code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = analyze_text(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
_UpperCAmelCase : Optional[int] = sum(single_char_strings.values() )
# one length string
_UpperCAmelCase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_UpperCAmelCase : Tuple = single_char_strings[ch]
_UpperCAmelCase : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCAmelCase ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
_UpperCAmelCase : Tuple = sum(two_char_strings.values() )
_UpperCAmelCase : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_UpperCAmelCase : str = cha + cha
if sequence in two_char_strings:
_UpperCAmelCase : List[str] = two_char_strings[sequence]
_UpperCAmelCase : Any = int(__lowerCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCAmelCase )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = Counter() # type: ignore
_UpperCAmelCase : Union[str, Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowerCAmelCase ():
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 361
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCamelCase__ = int(input('Enter number: ').strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 322
| 0
|
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowerCamelCase__ = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
lowerCamelCase__ = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
lowerCamelCase__ = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_UpperCAmelCase : Any = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_UpperCAmelCase : Tuple = evaluate(dataset=lowerCamelCase__ , predictions=lowerCamelCase__ )
return score
| 362
|
'''simple docstring'''
from collections.abc import Sequence
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = 0.0
for coeff in reversed(__lowerCAmelCase ):
_UpperCAmelCase : int = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 322
| 0
|
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : DDPMScheduler , lowerCamelCase__ : Optional[Any] , ) ->Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[str] = value_function
_UpperCAmelCase : Dict = unet
_UpperCAmelCase : Dict = scheduler
_UpperCAmelCase : Optional[Any] = env
_UpperCAmelCase : Optional[Any] = env.get_dataset()
_UpperCAmelCase : int = {}
for key in self.data.keys():
try:
_UpperCAmelCase : Optional[int] = self.data[key].mean()
except: # noqa: E722
pass
_UpperCAmelCase : Union[str, Any] = {}
for key in self.data.keys():
try:
_UpperCAmelCase : Tuple = self.data[key].std()
except: # noqa: E722
pass
_UpperCAmelCase : Optional[Any] = env.observation_space.shape[0]
_UpperCAmelCase : Tuple = env.action_space.shape[0]
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] ) ->List[str]:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ) ->int:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : Any ) ->Optional[int]:
'''simple docstring'''
for key, val in cond.items():
_UpperCAmelCase : Optional[int] = val.clone()
return x_in
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = x.shape[0]
_UpperCAmelCase : int = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_UpperCAmelCase : Optional[int] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_UpperCAmelCase : str = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
_UpperCAmelCase : Any = torch.autograd.grad([y.sum()] , [x] )[0]
_UpperCAmelCase : Optional[int] = self.scheduler._get_variance(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.exp(0.5 * posterior_variance )
_UpperCAmelCase : Tuple = model_std * grad
_UpperCAmelCase : int = 0
_UpperCAmelCase : Tuple = x.detach()
_UpperCAmelCase : Optional[Any] = x + scale * grad
_UpperCAmelCase : List[str] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
_UpperCAmelCase : Optional[int] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_UpperCAmelCase : Any = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
_UpperCAmelCase : List[str] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
_UpperCAmelCase : int = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int]=64 , lowerCamelCase__ : List[str]=32 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : List[Any]=0.1 ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.normalize(lowerCamelCase__ , "observations" )
_UpperCAmelCase : Tuple = obs[None].repeat(lowerCamelCase__ , axis=0 )
_UpperCAmelCase : Union[str, Any] = {0: self.to_torch(lowerCamelCase__ )}
_UpperCAmelCase : Tuple = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_UpperCAmelCase : List[str] = randn_tensor(lowerCamelCase__ , device=self.unet.device )
_UpperCAmelCase : int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
_UpperCAmelCase : Union[str, Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
_UpperCAmelCase : List[str] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
_UpperCAmelCase : Any = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
_UpperCAmelCase : Dict = x[sorted_idx]
_UpperCAmelCase : Tuple = sorted_values[:, :, : self.action_dim]
_UpperCAmelCase : List[str] = actions.detach().cpu().numpy()
_UpperCAmelCase : int = self.de_normalize(lowerCamelCase__ , key="actions" )
# select the action with the highest value
if y is not None:
_UpperCAmelCase : str = 0
else:
# if we didn't run value guiding, select a random action
_UpperCAmelCase : int = np.random.randint(0 , lowerCamelCase__ )
_UpperCAmelCase : str = denorm_actions[selected_index, 0]
return denorm_actions
| 363
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
_UpperCAmelCase : Tuple = sum(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCAmelCase : Any = True
for i in range(1 , s + 1 ):
_UpperCAmelCase : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCAmelCase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCAmelCase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCAmelCase : List[Any] = s - 2 * j
break
return diff
| 322
| 0
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Optional[int] , lowerCamelCase__ : int = 7_68 , ) ->List[Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
_UpperCAmelCase : Any = nn.Parameter(torch.ones(1 , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[Union[str, torch.device]] = None , lowerCamelCase__ : Optional[torch.dtype] = None , ) ->str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = nn.Parameter(self.mean.to(lowerCamelCase__ ).to(lowerCamelCase__ ) )
_UpperCAmelCase : str = nn.Parameter(self.std.to(lowerCamelCase__ ).to(lowerCamelCase__ ) )
return self
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = (embeds * self.std) + self.mean
return embeds
| 364
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "resnet"
lowerCAmelCase : Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ : int=[3, 4, 6, 3] , lowerCamelCase__ : Dict="bottleneck" , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[str] = embedding_size
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : List[Any] = layer_type
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = downsample_in_first_stage
_UpperCAmelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : str ) ->float:
'''simple docstring'''
return 1E-3
| 322
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('T')
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : T ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = data
_UpperCAmelCase : Node[T] | None = None
def __str__( self : Any ) ->str:
'''simple docstring'''
return F"""{self.data}"""
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) ->None:
'''simple docstring'''
_UpperCAmelCase : Node[T] | None = None
def __iter__( self : List[str] ) ->Iterator[T]:
'''simple docstring'''
_UpperCAmelCase : Any = self.top
while node:
yield node.data
_UpperCAmelCase : Dict = node.next
def __str__( self : Dict ) ->str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __len__( self : Optional[int] ) ->int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowerCAmelCase__ ( self : List[Any] ) ->bool:
'''simple docstring'''
return self.top is None
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Node(lowerCamelCase__ )
if not self.is_empty():
_UpperCAmelCase : Tuple = self.top
_UpperCAmelCase : List[str] = node
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.top
_UpperCAmelCase : Optional[Any] = self.top.next
return pop_node.data
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def lowerCAmelCase__ ( self : List[Any] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 365
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase : int = [image]
_UpperCAmelCase : str = [trans(img.convert("RGB" ) ) for img in image]
_UpperCAmelCase : Optional[Any] = torch.stack(__lowerCAmelCase )
return image
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str ) ->Union[str, Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = min(int(num_inference_steps * strength ) , lowerCamelCase__ )
_UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=None ) ->str:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}""" )
_UpperCAmelCase : Union[str, Any] = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : List[str] = init_latents.shape
_UpperCAmelCase : Optional[int] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
# get latents
print("add noise to latents at timestep" , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , lowerCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase__ : float = 0.8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowerCamelCase__ )
# 2. Preprocess image
_UpperCAmelCase : Dict = preprocess(lowerCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , self.device )
_UpperCAmelCase : List[Any] = timesteps[:1].repeat(lowerCamelCase__ )
# 4. Prepare latent variables
_UpperCAmelCase : Optional[int] = self.prepare_latents(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.unet.dtype , self.device , lowerCamelCase__ )
_UpperCAmelCase : Any = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase__ ):
# 1. predict noise model_output
_UpperCAmelCase : Union[str, Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , eta=lowerCamelCase__ , use_clipped_model_output=lowerCamelCase__ , generator=lowerCamelCase__ , ).prev_sample
_UpperCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase__ )
| 322
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
lowerCamelCase__ = {'allegro/herbert-base-cased': 514}
lowerCamelCase__ = {}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : List[Any] = HerbertTokenizer
def __init__( self : Dict , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : Optional[Any]="<unk>" , lowerCamelCase__ : Union[str, Any]="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : str="</s>" , **lowerCamelCase__ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , **lowerCamelCase__ , )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [self.cls_token_id]
_UpperCAmelCase : List[str] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : int = [self.sep_token_id]
_UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 366
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : float ) ->float:
'''simple docstring'''
return 0.0
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_UpperCAmelCase : str = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = 512
_UpperCAmelCase : str = [1] + [0] * (size - 1)
_UpperCAmelCase : int = [filter_type.process(__lowerCAmelCase ) for item in inputs]
_UpperCAmelCase : int = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase : Any = np.abs(np.fft.fft(__lowerCAmelCase ) )
_UpperCAmelCase : Any = 20 * np.logaa(__lowerCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_UpperCAmelCase : Tuple = get_bounds(__lowerCAmelCase , __lowerCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCAmelCase )
plt.show()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = 512
_UpperCAmelCase : Union[str, Any] = [1] + [0] * (size - 1)
_UpperCAmelCase : Union[str, Any] = [filter_type.process(__lowerCAmelCase ) for item in inputs]
_UpperCAmelCase : Tuple = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase : List[Any] = np.angle(np.fft.fft(__lowerCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCAmelCase , -2 * pi ) )
plt.show()
| 367
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322
| 0
|
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCamelCase__ = 299_792_458
# Symbols
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ = symbols('ct x y z')
def __lowerCAmelCase (__lowerCAmelCase ):
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def __lowerCAmelCase (__lowerCAmelCase ):
return 1 / sqrt(1 - beta(__lowerCAmelCase ) ** 2 )
def __lowerCAmelCase (__lowerCAmelCase ):
return np.array(
[
[gamma(__lowerCAmelCase ), -gamma(__lowerCAmelCase ) * beta(__lowerCAmelCase ), 0, 0],
[-gamma(__lowerCAmelCase ) * beta(__lowerCAmelCase ), gamma(__lowerCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = None ):
# Ensure event is not empty
if event is None:
_UpperCAmelCase : Union[str, Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__lowerCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCamelCase__ = transform(29_979_245)
print('Example of four vector: ')
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
lowerCamelCase__ = {ct: c, x: 1, y: 1, z: 1}
lowerCamelCase__ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 368
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('T')
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : T ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = data
_UpperCAmelCase : Node[T] | None = None
def __str__( self : Any ) ->str:
'''simple docstring'''
return F"""{self.data}"""
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) ->None:
'''simple docstring'''
_UpperCAmelCase : Node[T] | None = None
def __iter__( self : List[str] ) ->Iterator[T]:
'''simple docstring'''
_UpperCAmelCase : Any = self.top
while node:
yield node.data
_UpperCAmelCase : Dict = node.next
def __str__( self : Dict ) ->str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __len__( self : Optional[int] ) ->int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowerCAmelCase__ ( self : List[Any] ) ->bool:
'''simple docstring'''
return self.top is None
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Node(lowerCamelCase__ )
if not self.is_empty():
_UpperCAmelCase : Tuple = self.top
_UpperCAmelCase : List[str] = node
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.top
_UpperCAmelCase : Optional[Any] = self.top.next
return pop_node.data
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def lowerCAmelCase__ ( self : List[Any] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 322
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : int=2 , lowerCamelCase__ : int=7 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=36 , lowerCamelCase__ : Any=2 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Any=5_12 , lowerCamelCase__ : List[Any]=16 , lowerCamelCase__ : int=2 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Dict=6 , lowerCamelCase__ : Union[str, Any]=6 , lowerCamelCase__ : Any=3 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : int=None , lowerCamelCase__ : Dict=10_00 , ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : Union[str, Any] = num_channels
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : List[str] = patch_size
_UpperCAmelCase : str = is_training
_UpperCAmelCase : Union[str, Any] = use_input_mask
_UpperCAmelCase : str = use_token_type_ids
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Dict = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Any = coordinate_size
_UpperCAmelCase : Optional[int] = shape_size
_UpperCAmelCase : int = num_labels
_UpperCAmelCase : Tuple = num_choices
_UpperCAmelCase : str = scope
_UpperCAmelCase : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase : Tuple = text_seq_length
_UpperCAmelCase : List[Any] = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase : int = self.text_seq_length + self.image_seq_length
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_UpperCAmelCase : List[Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase : List[Any] = bbox[i, j, 3]
_UpperCAmelCase : Any = bbox[i, j, 1]
_UpperCAmelCase : Optional[int] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase : Union[str, Any] = bbox[i, j, 2]
_UpperCAmelCase : str = bbox[i, j, 0]
_UpperCAmelCase : List[str] = tmp_coordinate
_UpperCAmelCase : Union[str, Any] = tf.constant(lowerCamelCase__ )
_UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : str = None
if self.use_input_mask:
_UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase : List[str] = None
if self.use_token_type_ids:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase : Any = None
_UpperCAmelCase : Tuple = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase : List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
_UpperCAmelCase : str = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase : Optional[Any] = model({"pixel_values": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Any = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : Tuple = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
_UpperCAmelCase : Dict = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : Union[str, Any] = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
_UpperCAmelCase : Dict = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(_UpperCAmelCase) : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[str] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase : List[str] = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase : Tuple = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Optional[Any] = False
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int ) ->Tuple:
'''simple docstring'''
return True
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict=False ) ->dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
_UpperCAmelCase : List[str] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_UpperCAmelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = TFLayoutLMvaModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , "hf_compute_loss" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
_UpperCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
_UpperCAmelCase : str = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_UpperCAmelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = prepared_for_class.pop("input_ids" )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_UpperCAmelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
_UpperCAmelCase : List[str] = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_UpperCAmelCase : Union[str, Any] = -1_00
_UpperCAmelCase : List[str] = tf.convert_to_tensor(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_UpperCAmelCase : int = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : Any = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_UpperCAmelCase : Any = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
_UpperCAmelCase : Dict = prepared_for_class.keys() - inputs_dict.keys()
_UpperCAmelCase : List[str] = inspect.signature(model.call ).parameters
_UpperCAmelCase : Optional[Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_UpperCAmelCase : Optional[Any] = {0: "input_ids"}
for label_key in label_keys:
_UpperCAmelCase : Any = signature_names.index(lowerCamelCase__ )
_UpperCAmelCase : Dict = label_key
_UpperCAmelCase : Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_UpperCAmelCase : int = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_UpperCAmelCase : Union[str, Any] = prepared_for_class[value]
_UpperCAmelCase : str = tuple(lowerCamelCase__ )
# Send to model
_UpperCAmelCase : Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
(
_UpperCAmelCase
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
(
_UpperCAmelCase
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : Any = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
(
_UpperCAmelCase
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->Any:
'''simple docstring'''
(
_UpperCAmelCase
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Tuple:
'''simple docstring'''
(
_UpperCAmelCase
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Any = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ():
_UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Optional[int] = prepare_img()
_UpperCAmelCase : int = image_processor(images=lowerCamelCase__ , return_tensors="tf" ).pixel_values
_UpperCAmelCase : Optional[int] = tf.constant([[1, 2]] )
_UpperCAmelCase : List[str] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_UpperCAmelCase : List[Any] = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
_UpperCAmelCase : Dict = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 369
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : int = "speech_to_text_2"
lowerCAmelCase : str = ["past_key_values"]
lowerCAmelCase : int = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[Any] , lowerCamelCase__ : Tuple=1_00_00 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Tuple=20_48 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple="relu" , lowerCamelCase__ : Dict=2_56 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Any=1 , lowerCamelCase__ : int=0 , lowerCamelCase__ : str=2 , lowerCamelCase__ : List[Any]=10_24 , **lowerCamelCase__ : str , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Optional[int] = d_model
_UpperCAmelCase : List[Any] = decoder_ffn_dim
_UpperCAmelCase : Any = decoder_layers
_UpperCAmelCase : int = decoder_attention_heads
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : Optional[int] = activation_dropout
_UpperCAmelCase : List[Any] = activation_function
_UpperCAmelCase : int = init_std
_UpperCAmelCase : Dict = decoder_layerdrop
_UpperCAmelCase : str = use_cache
_UpperCAmelCase : Union[str, Any] = decoder_layers
_UpperCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : Any = max_target_positions
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 322
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['MaskFormerFeatureExtractor']
lowerCamelCase__ = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
lowerCamelCase__ = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 370
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=" " ):
_UpperCAmelCase : Any = text.split(__lowerCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Dict = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(__lowerCAmelCase ):
titles.append(title if title is not None else "" )
texts.append(__lowerCAmelCase )
return {"title": titles, "text": texts}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : str = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=__lowerCAmelCase , padding="longest" , return_tensors="pt" )["input_ids"]
_UpperCAmelCase : str = ctx_encoder(input_ids.to(device=__lowerCAmelCase ) , return_dict=__lowerCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_UpperCAmelCase : Optional[int] = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_UpperCAmelCase : Optional[int] = dataset.map(__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_UpperCAmelCase : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_UpperCAmelCase : Dict = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_UpperCAmelCase : int = dataset.map(
partial(__lowerCAmelCase , ctx_encoder=__lowerCAmelCase , ctx_tokenizer=__lowerCAmelCase ) , batched=__lowerCAmelCase , batch_size=processing_args.batch_size , features=__lowerCAmelCase , )
# And finally save your dataset
_UpperCAmelCase : List[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(__lowerCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_UpperCAmelCase : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=__lowerCAmelCase )
# And save the index
_UpperCAmelCase : List[str] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__lowerCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
lowerCAmelCase : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
lowerCAmelCase : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
lowerCAmelCase : Optional[str] = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
lowerCAmelCase : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
lowerCAmelCase : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 322
| 0
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCAmelCase (__lowerCAmelCase ):
return (data["data"], data["target"])
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[Any] = load_iris()
_UpperCAmelCase : Union[str, Any] = data_handling(__lowerCAmelCase )
_UpperCAmelCase : Any = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.2_5 )
_UpperCAmelCase : List[Any] = iris["target_names"]
# Create an XGBoost Classifier from the training data
_UpperCAmelCase : List[Any] = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 371
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase__ ( cls : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ) ->int:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , repo_id="test-config" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id="valid_org/test-config-org" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : int = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
CustomConfig.register_for_auto_class()
_UpperCAmelCase : int = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
_UpperCAmelCase : str = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_UpperCAmelCase : Any = c.n_embd + 1 # int
_UpperCAmelCase : List[Any] = c.resid_pdrop + 1.0 # float
_UpperCAmelCase : Tuple = not c.scale_attn_weights # bool
_UpperCAmelCase : List[Any] = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(lowerCamelCase__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(lowerCamelCase__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(lowerCamelCase__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(lowerCamelCase__ , c.summary_type , "mismatch for key: summary_type" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = PretrainedConfig()
_UpperCAmelCase : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_UpperCAmelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase__ , lowerCamelCase__ )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {', '.join(lowerCamelCase__ )}.""" )
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = mock.Mock()
_UpperCAmelCase : List[str] = 5_00
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Tuple = HTTPError
_UpperCAmelCase : Any = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCamelCase__ ) as mock_head:
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = AutoConfig.from_pretrained("bert-base-cased" )
_UpperCAmelCase : str = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Dict = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_UpperCAmelCase : Dict = ["config.42.0.0.json"]
_UpperCAmelCase : Union[str, Any] = 7_68
configuration.save_pretrained(lowerCamelCase__ )
shutil.move(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , os.path.join(lowerCamelCase__ , "config.42.0.0.json" ) )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_UpperCAmelCase : Any = "v4.0.0"
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_UpperCAmelCase : List[Any] = "v3.0.0"
_UpperCAmelCase : int = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 322
| 0
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def SCREAMING_SNAKE_CASE__ ( ) -> tuple[list[int], int]:
'''simple docstring'''
lowerCAmelCase : List[str] = [randint(-1_000, 1_000 ) for i in range(10 )]
lowerCAmelCase : Optional[int] = randint(-5_000, 5_000 )
return (arr, r)
__A : Union[str, Any] = make_dataset()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> tuple[int, ...]:
'''simple docstring'''
for triplet in permutations(_UpperCAmelCase, 3 ):
if sum(_UpperCAmelCase ) == target:
return tuple(sorted(_UpperCAmelCase ) )
return (0, 0, 0)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> tuple[int, int, int]:
'''simple docstring'''
arr.sort()
lowerCAmelCase : Dict = len(_UpperCAmelCase )
for i in range(n - 1 ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def SCREAMING_SNAKE_CASE__ ( ) -> tuple[float, float]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
lowerCAmelCase : List[str] = '\ntriplet_sum1(*dataset)\n'
lowerCAmelCase : str = '\ntriplet_sum2(*dataset)\n'
lowerCAmelCase : Optional[int] = repeat(setup=_UpperCAmelCase, stmt=_UpperCAmelCase, repeat=5, number=10_000 )
lowerCAmelCase : Optional[Any] = repeat(setup=_UpperCAmelCase, stmt=_UpperCAmelCase, repeat=5, number=10_000 )
return (min(_UpperCAmelCase ), min(_UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__A : Dict = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 323
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323
| 1
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Tuple = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = "align_text_model"
def __init__( self : Dict , UpperCAmelCase_ : int=30522 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Any=3072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Tuple=1E-12 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Optional[Any]="absolute" , UpperCAmelCase_ : Dict=True , **UpperCAmelCase_ : List[Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : Tuple = use_cache
lowerCAmelCase : int = pad_token_id
@classmethod
def lowercase__ ( cls : str , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Optional[Any] ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase : Any = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
lowerCAmelCase : List[str] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : int = "align_vision_model"
def __init__( self : Optional[int] , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 600 , UpperCAmelCase_ : float = 2.0 , UpperCAmelCase_ : float = 3.1 , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase_ : List[int] = [] , UpperCAmelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase_ : float = 0.25 , UpperCAmelCase_ : str = "swish" , UpperCAmelCase_ : int = 2560 , UpperCAmelCase_ : str = "mean" , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 0.0_01 , UpperCAmelCase_ : float = 0.99 , UpperCAmelCase_ : float = 0.2 , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : str = num_channels
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : Any = width_coefficient
lowerCAmelCase : Optional[Any] = depth_coefficient
lowerCAmelCase : List[str] = depth_divisor
lowerCAmelCase : Tuple = kernel_sizes
lowerCAmelCase : Any = in_channels
lowerCAmelCase : List[Any] = out_channels
lowerCAmelCase : Union[str, Any] = depthwise_padding
lowerCAmelCase : List[Any] = strides
lowerCAmelCase : Tuple = num_block_repeats
lowerCAmelCase : Optional[Any] = expand_ratios
lowerCAmelCase : int = squeeze_expansion_ratio
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : Tuple = hidden_dim
lowerCAmelCase : Tuple = pooling_type
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Union[str, Any] = batch_norm_eps
lowerCAmelCase : List[Any] = batch_norm_momentum
lowerCAmelCase : int = drop_connect_rate
lowerCAmelCase : Union[str, Any] = sum(UpperCAmelCase_ ) * 4
@classmethod
def lowercase__ ( cls : int , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Any ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase : Any = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
lowerCAmelCase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : int = "align"
lowerCAmelCase_ : str = True
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=640 , UpperCAmelCase_ : Optional[int]=1.0 , UpperCAmelCase_ : Optional[int]=0.02 , **UpperCAmelCase_ : str , ):
super().__init__(**UpperCAmelCase_ )
if text_config is None:
lowerCAmelCase : Tuple = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
lowerCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
lowerCAmelCase : int = AlignTextConfig(**UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = AlignVisionConfig(**UpperCAmelCase_ )
lowerCAmelCase : Tuple = projection_dim
lowerCAmelCase : Optional[Any] = temperature_init_value
lowerCAmelCase : int = initializer_range
@classmethod
def lowercase__ ( cls : List[Any] , UpperCAmelCase_ : AlignTextConfig , UpperCAmelCase_ : AlignVisionConfig , **UpperCAmelCase_ : Dict ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase : Tuple = self.text_config.to_dict()
lowerCAmelCase : Union[str, Any] = self.vision_config.to_dict()
lowerCAmelCase : Any = self.__class__.model_type
return output
| 323
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return x + 2
class __A ( unittest.TestCase ):
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = 'x = 3'
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
lowerCAmelCase : Dict = 'x = y'
lowerCAmelCase : List[Any] = {'y': 5}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 5, 'y': 5} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = 'y = add_two(x)'
lowerCAmelCase : int = {'x': 3}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 'x = 3'
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = 'x = 3\ny = 5'
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = 'text = f\'This is x: {x}.\''
lowerCAmelCase : str = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'text': 'This is x: 3.'} )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 2} )
lowerCAmelCase : Any = {'x': 8}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 8, 'y': 5} )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = 'test_list = [x, add_two(x)]'
lowerCAmelCase : Optional[Any] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [3, 5] )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : int = 'y = x'
lowerCAmelCase : Optional[int] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 3} )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = 'test_list = [x, add_two(x)]\ntest_list[1]'
lowerCAmelCase : List[str] = {'x': 3}
lowerCAmelCase : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
lowerCAmelCase : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
lowerCAmelCase : List[Any] = {'x': 3}
lowerCAmelCase : Optional[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = 'x = 0\nfor i in range(3):\n x = i'
lowerCAmelCase : str = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {'range': range} , state=UpperCAmelCase_ )
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 2, 'i': 2} )
| 323
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((lowerCAmelCase) , (lowerCAmelCase)) : Union[str, Any] = extended_euclid(_UpperCAmelCase, a % b )
lowerCAmelCase : Optional[int] = a // b
return (y, x - k * y)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
((lowerCAmelCase) , (lowerCAmelCase)) : Tuple = extended_euclid(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : Tuple = na * na
lowerCAmelCase : Optional[int] = ra * x * na + ra * y * na
return (n % m + m) % m
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
((lowerCAmelCase) , (lowerCAmelCase)) : int = extended_euclid(_UpperCAmelCase, _UpperCAmelCase )
if b < 0:
lowerCAmelCase : Union[str, Any] = (b % n + n) % n
return b
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : int = invert_modulo(_UpperCAmelCase, _UpperCAmelCase ), invert_modulo(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : int = na * na
lowerCAmelCase : Dict = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 323
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323
| 1
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
__A : Optional[int] = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCAmelCase : Dict = k.replace(_UpperCAmelCase, _UpperCAmelCase )
if k.startswith('encoder' ):
lowerCAmelCase : List[str] = k.replace('.attn', '.self_attn' )
lowerCAmelCase : str = k.replace('norm1', 'self_attn_layer_norm' )
lowerCAmelCase : Union[str, Any] = k.replace('norm2', 'final_layer_norm' )
elif k.startswith('decoder' ):
lowerCAmelCase : Any = k.replace('norm1', 'self_attn_layer_norm' )
lowerCAmelCase : Optional[int] = k.replace('norm2', 'encoder_attn_layer_norm' )
lowerCAmelCase : Any = k.replace('norm3', 'final_layer_norm' )
return k
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
lowerCAmelCase : int = sd.pop(_UpperCAmelCase )
lowerCAmelCase : int = k.replace('layernorm_embedding', 'layer_norm' )
assert new_k not in sd
lowerCAmelCase : Optional[int] = v
__A : Optional[int] = ['''START''']
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Tuple = torch.load(_UpperCAmelCase, map_location='cpu' )
lowerCAmelCase : Optional[Any] = model['model']
lowerCAmelCase : Optional[Any] = BlenderbotConfig.from_json_file(_UpperCAmelCase )
lowerCAmelCase : List[str] = BlenderbotForConditionalGeneration(_UpperCAmelCase )
lowerCAmelCase : str = m.model.state_dict().keys()
lowerCAmelCase : int = []
lowerCAmelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCAmelCase : int = rename_state_dict_key(_UpperCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCAmelCase : List[str] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_UpperCAmelCase )
m.model.load_state_dict(_UpperCAmelCase, strict=_UpperCAmelCase )
m.half()
m.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
__A : Dict = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 323
|
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[Any] = ''
else:
lowerCAmelCase : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : List[str] = dct.pop(_UpperCAmelCase )
lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : str = ViTMSNConfig()
lowerCAmelCase : str = 1_000
lowerCAmelCase : List[str] = 'datasets/huggingface/label-files'
lowerCAmelCase : int = 'imagenet-1k-id2label.json'
lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[Any] = 384
lowerCAmelCase : List[Any] = 1_536
lowerCAmelCase : Union[str, Any] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase : List[Any] = 1_024
lowerCAmelCase : Any = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : Any = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase : Any = 4
elif "l7" in checkpoint_url:
lowerCAmelCase : int = 7
lowerCAmelCase : str = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Tuple = 16
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : List[str] = ViTMSNModel(_UpperCAmelCase )
lowerCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )['target_encoder']
lowerCAmelCase : int = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
lowerCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase, base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase, base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Dict = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
lowerCAmelCase : Any = ViTImageProcessor(
size=config.image_size, image_mean=_UpperCAmelCase, image_std=_UpperCAmelCase )
lowerCAmelCase : List[Any] = image_processor(images=_UpperCAmelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _UpperCAmelCase, atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : List[str] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> np.array:
'''simple docstring'''
lowerCAmelCase : Tuple = f"{sampling_rate}"
lowerCAmelCase : Union[str, Any] = '1'
lowerCAmelCase : Tuple = 'f32le'
lowerCAmelCase : Optional[Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_UpperCAmelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase : Optional[int] = ffmpeg_process.communicate(_UpperCAmelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCAmelCase : str = output_stream[0]
lowerCAmelCase : List[str] = np.frombuffer(_UpperCAmelCase, np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = "f32le", ) -> str:
'''simple docstring'''
lowerCAmelCase : List[str] = f"{sampling_rate}"
lowerCAmelCase : Union[str, Any] = '1'
if format_for_conversion == "s16le":
lowerCAmelCase : Optional[int] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase : Tuple = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
lowerCAmelCase : Optional[int] = platform.system()
if system == "Linux":
lowerCAmelCase : Union[str, Any] = 'alsa'
lowerCAmelCase : Optional[Any] = 'default'
elif system == "Darwin":
lowerCAmelCase : Optional[int] = 'avfoundation'
lowerCAmelCase : str = ':0'
elif system == "Windows":
lowerCAmelCase : Optional[Any] = 'dshow'
lowerCAmelCase : List[Any] = 'default'
lowerCAmelCase : Optional[Any] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCAmelCase : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase : Any = _ffmpeg_stream(_UpperCAmelCase, _UpperCAmelCase )
for item in iterator:
yield item
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = "f32le", ) -> Dict:
'''simple docstring'''
if stream_chunk_s is not None:
lowerCAmelCase : Union[str, Any] = stream_chunk_s
else:
lowerCAmelCase : Tuple = chunk_length_s
lowerCAmelCase : Union[str, Any] = ffmpeg_microphone(_UpperCAmelCase, _UpperCAmelCase, format_for_conversion=_UpperCAmelCase )
if format_for_conversion == "s16le":
lowerCAmelCase : Any = np.intaa
lowerCAmelCase : str = 2
elif format_for_conversion == "f32le":
lowerCAmelCase : Tuple = np.floataa
lowerCAmelCase : int = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
lowerCAmelCase : int = chunk_length_s / 6
lowerCAmelCase : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_UpperCAmelCase, (int, float) ):
lowerCAmelCase : List[str] = [stride_length_s, stride_length_s]
lowerCAmelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase : Tuple = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase : List[Any] = datetime.datetime.now()
lowerCAmelCase : Optional[Any] = datetime.timedelta(seconds=_UpperCAmelCase )
for item in chunk_bytes_iter(_UpperCAmelCase, _UpperCAmelCase, stride=(stride_left, stride_right), stream=_UpperCAmelCase ):
# Put everything back in numpy scale
lowerCAmelCase : Optional[int] = np.frombuffer(item['raw'], dtype=_UpperCAmelCase )
lowerCAmelCase : List[Any] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCAmelCase : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Any = b''
lowerCAmelCase , lowerCAmelCase : Any = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
lowerCAmelCase : Optional[Any] = 0
for raw in iterator:
acc += raw
if stream and len(_UpperCAmelCase ) < chunk_len:
lowerCAmelCase : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_UpperCAmelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase : Optional[int] = (_stride_left, stride_right)
lowerCAmelCase : int = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCAmelCase : Any = False
yield item
lowerCAmelCase : Optional[int] = stride_left
lowerCAmelCase : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_UpperCAmelCase ) > stride_left:
lowerCAmelCase : Any = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCAmelCase : Dict = False
yield item
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Tuple = 2**24 # 16Mo
try:
with subprocess.Popen(_UpperCAmelCase, stdout=subprocess.PIPE, bufsize=_UpperCAmelCase ) as ffmpeg_process:
while True:
lowerCAmelCase : int = ffmpeg_process.stdout.read(_UpperCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 323
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323
| 1
|
from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 1_000_000, _UpperCAmelCase = 10 ) -> int:
'''simple docstring'''
lowerCAmelCase : defaultdict = defaultdict(_UpperCAmelCase )
for outer_width in range(3, (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase : Dict = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ), 1 )
else:
lowerCAmelCase : Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCAmelCase, outer_width - 1, 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'{solution() = }')
| 323
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 323
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__A : Tuple = logging.getLogger(__name__)
@dataclass
class __A :
lowerCAmelCase_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase_ : bool = field(
default=lowerCAmelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCAmelCase_ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase_ : bool = field(
default=lowerCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __A :
lowerCAmelCase_ : Optional[str] = field(default=lowerCAmelCase , metadata={"help": "The input training data file (a text file)."} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCAmelCase_ : bool = field(
default=lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase_ : bool = field(
default=lowerCAmelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase__ ( self : List[str] ):
if self.train_file is not None:
lowerCAmelCase : int = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCAmelCase : Union[str, Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __A :
lowerCAmelCase_ : PreTrainedTokenizerBase
lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = True
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
def __call__( self : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : Union[str, Any] = 'label' if 'label' in features[0].keys() else 'labels'
lowerCAmelCase : str = [feature.pop(UpperCAmelCase_ ) for feature in features]
lowerCAmelCase : int = len(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = len(features[0]['input_ids'] )
lowerCAmelCase : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase_ )] for feature in features
]
lowerCAmelCase : Tuple = list(chain(*UpperCAmelCase_ ) )
lowerCAmelCase : Tuple = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
lowerCAmelCase : Any = {k: v.view(UpperCAmelCase_ , UpperCAmelCase_ , -1 ) for k, v in batch.items()}
# Add back labels
lowerCAmelCase : str = torch.tensor(UpperCAmelCase_ , dtype=torch.intaa )
return batch
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag', _UpperCAmelCase, _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCAmelCase : Union[str, Any] = {}
if data_args.train_file is not None:
lowerCAmelCase : Dict = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase : List[str] = data_args.validation_file
lowerCAmelCase : Any = data_args.train_file.split('.' )[-1]
lowerCAmelCase : List[str] = load_dataset(
_UpperCAmelCase, data_files=_UpperCAmelCase, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
lowerCAmelCase : List[str] = load_dataset(
'swag', 'regular', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
lowerCAmelCase : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=_UpperCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCAmelCase : str = [f"ending{i}" for i in range(4 )]
lowerCAmelCase : List[Any] = 'sent1'
lowerCAmelCase : Optional[int] = 'sent2'
if data_args.max_seq_length is None:
lowerCAmelCase : List[str] = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
lowerCAmelCase : List[Any] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCAmelCase : Optional[Any] = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCAmelCase ):
lowerCAmelCase : str = [[context] * 4 for context in examples[context_name]]
lowerCAmelCase : Optional[int] = examples[question_header_name]
lowerCAmelCase : Optional[Any] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(_UpperCAmelCase )
]
# Flatten out
lowerCAmelCase : List[Any] = list(chain(*_UpperCAmelCase ) )
lowerCAmelCase : str = list(chain(*_UpperCAmelCase ) )
# Tokenize
lowerCAmelCase : int = tokenizer(
_UpperCAmelCase, _UpperCAmelCase, truncation=_UpperCAmelCase, max_length=_UpperCAmelCase, padding='max_length' if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(_UpperCAmelCase ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
lowerCAmelCase : List[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
lowerCAmelCase : Tuple = min(len(_UpperCAmelCase ), data_args.max_train_samples )
lowerCAmelCase : Dict = train_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCAmelCase : Union[str, Any] = train_dataset.map(
_UpperCAmelCase, batched=_UpperCAmelCase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
lowerCAmelCase : Dict = raw_datasets['validation']
if data_args.max_eval_samples is not None:
lowerCAmelCase : Optional[int] = min(len(_UpperCAmelCase ), data_args.max_eval_samples )
lowerCAmelCase : Tuple = eval_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCAmelCase : str = eval_dataset.map(
_UpperCAmelCase, batched=_UpperCAmelCase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
lowerCAmelCase : Tuple = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCAmelCase, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCAmelCase ):
lowerCAmelCase , lowerCAmelCase : int = eval_predictions
lowerCAmelCase : Optional[int] = np.argmax(_UpperCAmelCase, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCAmelCase : Union[str, Any] = Trainer(
model=_UpperCAmelCase, args=_UpperCAmelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=_UpperCAmelCase, data_collator=_UpperCAmelCase, compute_metrics=_UpperCAmelCase, )
# Training
if training_args.do_train:
lowerCAmelCase : Dict = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase : Optional[Any] = last_checkpoint
lowerCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase : int = train_result.metrics
lowerCAmelCase : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
lowerCAmelCase : Tuple = min(_UpperCAmelCase, len(_UpperCAmelCase ) )
trainer.log_metrics('train', _UpperCAmelCase )
trainer.save_metrics('train', _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase : int = trainer.evaluate()
lowerCAmelCase : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
lowerCAmelCase : Any = min(_UpperCAmelCase, len(_UpperCAmelCase ) )
trainer.log_metrics('eval', _UpperCAmelCase )
trainer.save_metrics('eval', _UpperCAmelCase )
lowerCAmelCase : Any = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 323
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323
| 1
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A ( unittest.TestCase ):
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[int] ):
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 250
lowerCAmelCase : Dict = ids_tensor((batch_size, length) , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = torch.ones((batch_size, length) , device=UpperCAmelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowercase__ ( self : Tuple ):
lowerCAmelCase , lowerCAmelCase : Any = self._get_tensors(5 )
lowerCAmelCase : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase , lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase , lowerCAmelCase : Tuple = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=10 )
lowerCAmelCase , lowerCAmelCase : Any = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase , lowerCAmelCase : Dict = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase , lowerCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase , lowerCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase , lowerCAmelCase : Dict = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase : Dict = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase , lowerCAmelCase : Any = self._get_tensors(5 )
lowerCAmelCase : int = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowercase__ ( self : Optional[int] ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCAmelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCAmelCase_ ) , 1 )
| 323
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[Any] = ''
else:
lowerCAmelCase : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : List[str] = dct.pop(_UpperCAmelCase )
lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : str = ViTMSNConfig()
lowerCAmelCase : str = 1_000
lowerCAmelCase : List[str] = 'datasets/huggingface/label-files'
lowerCAmelCase : int = 'imagenet-1k-id2label.json'
lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[Any] = 384
lowerCAmelCase : List[Any] = 1_536
lowerCAmelCase : Union[str, Any] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase : List[Any] = 1_024
lowerCAmelCase : Any = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : Any = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase : Any = 4
elif "l7" in checkpoint_url:
lowerCAmelCase : int = 7
lowerCAmelCase : str = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Tuple = 16
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : List[str] = ViTMSNModel(_UpperCAmelCase )
lowerCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )['target_encoder']
lowerCAmelCase : int = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
lowerCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase, base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase, base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Dict = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
lowerCAmelCase : Any = ViTImageProcessor(
size=config.image_size, image_mean=_UpperCAmelCase, image_std=_UpperCAmelCase )
lowerCAmelCase : List[Any] = image_processor(images=_UpperCAmelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _UpperCAmelCase, atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : List[str] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : List[Any] = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : Tuple = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowercase__ ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase : Dict = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase : List[str] = 'A painting of a squirrel eating a burger'
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
lowerCAmelCase : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
lowerCAmelCase : Any = output.images
lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Any = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase : List[Any] = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase : int = 'A painting of a squirrel eating a burger'
lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase : Tuple = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
lowerCAmelCase : str = output.images
lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Tuple = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Any = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase : str = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase : List[str] = 'A painting of a squirrel eating a burger'
lowerCAmelCase : Tuple = torch.manual_seed(0 )
lowerCAmelCase : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=UpperCAmelCase_ , )
lowerCAmelCase : Tuple = output.images
lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Tuple = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 323
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 323
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __A ( lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = "dinat"
lowerCAmelCase_ : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : Dict=[2, 4, 8, 16] , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase_ : int=3.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : str = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : List[Any] = len(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = num_heads
lowerCAmelCase : Tuple = kernel_size
lowerCAmelCase : List[str] = dilations
lowerCAmelCase : Any = mlp_ratio
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase : int = layer_scale_init_value
lowerCAmelCase : Optional[Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 323
| 1
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__A : Union[str, Any] = '''bart'''
__A : Optional[int] = True
@st.cache(allow_output_mutation=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
if LOAD_DENSE_INDEX:
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
lowerCAmelCase : List[str] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
lowerCAmelCase : Union[str, Any] = qar_model.eval()
else:
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
lowerCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
lowerCAmelCase : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
lowerCAmelCase : List[Any] = sas_model.eval()
else:
lowerCAmelCase , lowerCAmelCase : str = make_qa_sas_model(
model_name='t5-small', from_file='seq2seq_models/eli5_t5_model_1024_4.pth', device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
lowerCAmelCase : Optional[Any] = faiss.StandardGpuResources()
lowerCAmelCase : List[Any] = datasets.load_dataset(path='wiki_snippets', name='wiki40b_en_100_0' )['train']
lowerCAmelCase : Optional[int] = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat', dtype='float32', mode='r', shape=(wikiaab_passages.num_rows, 128), )
lowerCAmelCase : List[Any] = faiss.IndexFlatIP(128 )
lowerCAmelCase : Tuple = faiss.index_cpu_to_gpu(_UpperCAmelCase, 1, _UpperCAmelCase )
wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU
else:
lowerCAmelCase , lowerCAmelCase : Dict = (None, None)
lowerCAmelCase : Any = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = datasets.load_dataset('eli5', name='LFQA_reddit' )
lowerCAmelCase : Optional[Any] = elia['train_eli5']
lowerCAmelCase : Optional[int] = np.memmap(
'eli5_questions_reps.dat', dtype='float32', mode='r', shape=(elia_train.num_rows, 128) )
lowerCAmelCase : Optional[int] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__A , __A , __A : Any = load_indexes()
__A , __A , __A , __A : Optional[int] = load_models()
__A , __A : List[Any] = load_train_data()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=10 ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : List[str] = embed_questions_for_retrieval([question], _UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase , lowerCAmelCase : List[Any] = eli5_train_q_index.search(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : Tuple = [elia_train[int(_UpperCAmelCase )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase="wiki40b", _UpperCAmelCase="dense", _UpperCAmelCase=10 ) -> str:
'''simple docstring'''
if source == "none":
lowerCAmelCase , lowerCAmelCase : str = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCAmelCase , lowerCAmelCase : Optional[int] = query_qa_dense_index(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
else:
lowerCAmelCase , lowerCAmelCase : str = query_es_index(
_UpperCAmelCase, _UpperCAmelCase, index_name='english_wiki40b_snippets_100w', n_results=_UpperCAmelCase, )
lowerCAmelCase : Union[str, Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
lowerCAmelCase : Dict = 'question: {} context: {}'.format(_UpperCAmelCase, _UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None),
} )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=64, _UpperCAmelCase=256, _UpperCAmelCase=False, _UpperCAmelCase=2, _UpperCAmelCase=0.9_5, _UpperCAmelCase=0.8 ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowerCAmelCase : Tuple = qa_sas_generate(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, num_answers=1, num_beams=_UpperCAmelCase, min_len=_UpperCAmelCase, max_len=_UpperCAmelCase, do_sample=_UpperCAmelCase, temp=_UpperCAmelCase, top_p=_UpperCAmelCase, top_k=_UpperCAmelCase, max_input_length=1_024, device='cuda:0', )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__A : int = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__A : str = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__A : int = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__A : Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__A : int = st.sidebar.checkbox('''Demo options''')
if demo_options:
__A : Any = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__A : Optional[int] = action_list.index(action_st)
__A : Optional[Any] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__A : Tuple = show_type == '''Show full text of passages'''
else:
__A : Dict = 3
__A : Dict = True
__A : Optional[int] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__A : Tuple = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__A : Optional[int] = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__A : Any = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__A : Any = '''wiki40b'''
__A : Optional[int] = '''dense'''
__A : Union[str, Any] = '''beam'''
__A : Optional[Any] = 2
__A : Union[str, Any] = 64
__A : Dict = 256
__A : Dict = None
__A : Optional[Any] = None
__A : List[str] = st.sidebar.checkbox('''Generation options''')
if generate_options:
__A : Tuple = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__A : str = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__A : List[Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__A : str = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__A : Optional[Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__A : Tuple = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__A : Any = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__A : List[str] = None
# start main text
__A : int = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__A : List[str] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__A : Tuple = st.text_input('''Enter your question here:''', '''''')
else:
__A : List[str] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__A , __A : Optional[Any] = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__A , __A : Tuple = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__A : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__A : Optional[Any] = support_list[:10]
__A : Union[str, Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__A , __A : Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__A , __A : str = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__A : Any = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__A : Any = res[1].strip()
if sec_titles == "":
__A : Any = '''[{}]({})'''.format(res[0], wiki_url)
else:
__A : Tuple = sec_titles.split(''' & ''')
__A : Tuple = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__A : Tuple = find_nearest_training(question)
__A : str = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__A : Tuple = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__A : str = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 323
|
from manim import *
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : List[str] = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : str = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : int = [mem.copy() for i in range(4 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = Text('GPU' , font_size=24 )
lowerCAmelCase : Tuple = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : List[str] = Text('Model' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
for i, rect in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.8 )
target.move_to(UpperCAmelCase_ )
model_arr.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Tuple = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Union[str, Any] = Text('Disk' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
lowerCAmelCase : str = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = Square(0.3 )
input.set_fill(UpperCAmelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase_ , buff=0.5 )
self.play(Write(UpperCAmelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase_ , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase_ ) )
self.play(FadeOut(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = Arrow(start=UpperCAmelCase_ , end=UpperCAmelCase_ , color=UpperCAmelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase : int = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
lowerCAmelCase : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase_ ) , Circumscribe(model_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase : int = AnimationGroup(
FadeOut(UpperCAmelCase_ , run_time=0.5 ) , MoveToTarget(UpperCAmelCase_ , run_time=0.5 ) , FadeIn(UpperCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase : int = a_c
lowerCAmelCase : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase_ ) , FadeOut(UpperCAmelCase_ , run_time=0.5 ) , )
lowerCAmelCase : int = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , MoveToTarget(UpperCAmelCase_ ) )
self.wait()
| 323
| 1
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
print('Loading config file...' )
def flatten_yaml_as_dict(_UpperCAmelCase, _UpperCAmelCase="", _UpperCAmelCase="." ):
lowerCAmelCase : Any = []
for k, v in d.items():
lowerCAmelCase : int = parent_key + sep + k if parent_key else k
if isinstance(_UpperCAmelCase, collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(_UpperCAmelCase, _UpperCAmelCase, sep=_UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(_UpperCAmelCase )
lowerCAmelCase : str = argparse.Namespace()
with open(_UpperCAmelCase, 'r' ) as yaml_file:
try:
lowerCAmelCase : Tuple = yaml.load(_UpperCAmelCase, Loader=yaml.FullLoader )
lowerCAmelCase : int = flatten_yaml_as_dict(_UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(_UpperCAmelCase, str(_UpperCAmelCase ) ) )
return config
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[int] = MobileViTVaConfig()
lowerCAmelCase : Union[str, Any] = False
# dataset
if task_name.startswith('imagenet1k_' ):
lowerCAmelCase : Tuple = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
lowerCAmelCase : Optional[int] = 384
else:
lowerCAmelCase : Dict = 256
lowerCAmelCase : Union[str, Any] = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowerCAmelCase : Optional[Any] = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
lowerCAmelCase : Dict = 384
else:
lowerCAmelCase : Optional[Any] = 256
lowerCAmelCase : Dict = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowerCAmelCase : Any = 151
lowerCAmelCase : Dict = 512
lowerCAmelCase : List[str] = 'ade20k-id2label.json'
lowerCAmelCase : Any = True
elif task_name.startswith('voc_' ):
lowerCAmelCase : Dict = 21
lowerCAmelCase : str = 512
lowerCAmelCase : List[Any] = 'pascal-voc-id2label.json'
lowerCAmelCase : Dict = True
# orig_config
lowerCAmelCase : List[str] = load_orig_config_file(_UpperCAmelCase )
assert getattr(_UpperCAmelCase, 'model.classification.name', -1 ) == "mobilevit_v2", "Invalid model"
lowerCAmelCase : Dict = getattr(_UpperCAmelCase, 'model.classification.mitv2.width_multiplier', 1.0 )
assert (
getattr(_UpperCAmelCase, 'model.classification.mitv2.attn_norm_layer', -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCAmelCase : List[Any] = getattr(_UpperCAmelCase, 'model.classification.activation.name', 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCAmelCase : Any = getattr(_UpperCAmelCase, 'model.segmentation.output_stride', 16 )
if "_deeplabv3" in task_name:
lowerCAmelCase : str = getattr(_UpperCAmelCase, 'model.segmentation.deeplabv3.aspp_rates', [12, 24, 36] )
lowerCAmelCase : Optional[int] = getattr(_UpperCAmelCase, 'model.segmentation.deeplabv3.aspp_out_channels', 512 )
lowerCAmelCase : List[str] = getattr(_UpperCAmelCase, 'model.segmentation.deeplabv3.aspp_dropout', 0.1 )
# id2label
lowerCAmelCase : Optional[Any] = 'huggingface/label-files'
lowerCAmelCase : str = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase, repo_type='dataset' ), 'r' ) )
lowerCAmelCase : List[str] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : List[Any] = idalabel
lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = dct.pop(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = val
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
if base_model:
lowerCAmelCase : Union[str, Any] = ''
else:
lowerCAmelCase : Union[str, Any] = 'mobilevitv2.'
lowerCAmelCase : Optional[Any] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCAmelCase : Optional[Any] = k[8:]
else:
lowerCAmelCase : Optional[Any] = k
if ".block." in k:
lowerCAmelCase : Any = k_new.replace('.block.', '.' )
if ".conv." in k:
lowerCAmelCase : str = k_new.replace('.conv.', '.convolution.' )
if ".norm." in k:
lowerCAmelCase : List[str] = k_new.replace('.norm.', '.normalization.' )
if "conv_1." in k:
lowerCAmelCase : List[str] = k_new.replace('conv_1.', f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
lowerCAmelCase : int = k_new.replace(f"layer_{i}.", f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
lowerCAmelCase : Dict = k_new.replace('.exp_1x1.', '.expand_1x1.' )
if ".red_1x1." in k:
lowerCAmelCase : Optional[int] = k_new.replace('.red_1x1.', '.reduce_1x1.' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
lowerCAmelCase : List[str] = k_new.replace(f"layer_{i}.0.", f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
lowerCAmelCase : Optional[Any] = k_new.replace(f"layer_{i}.1.local_rep.0.", f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
lowerCAmelCase : Optional[int] = k_new.replace(f"layer_{i}.1.local_rep.1.", f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
lowerCAmelCase : Optional[Any] = [0, 1]
elif i == 4:
lowerCAmelCase : List[str] = [0, 1, 2, 3]
elif i == 5:
lowerCAmelCase : Tuple = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
lowerCAmelCase : str = k_new.replace(
f"layer_{i}.1.global_rep.{j}.", f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
lowerCAmelCase : str = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}.", f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
lowerCAmelCase : str = k_new.replace(f"layer_{i}.1.conv_proj.", f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
lowerCAmelCase : Dict = k_new.replace('pre_norm_attn.0.', 'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowerCAmelCase : int = k_new.replace('pre_norm_attn.1.', 'attention.' )
if "pre_norm_ffn.0." in k:
lowerCAmelCase : List[Any] = k_new.replace('pre_norm_ffn.0.', 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowerCAmelCase : int = k_new.replace('pre_norm_ffn.1.', 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowerCAmelCase : List[str] = k_new.replace('pre_norm_ffn.3.', 'ffn.conv2.' )
if "classifier.1." in k:
lowerCAmelCase : Optional[int] = k_new.replace('classifier.1.', 'classifier.' )
if "seg_head." in k:
lowerCAmelCase : Any = k_new.replace('seg_head.', 'segmentation_head.' )
if ".aspp_layer." in k:
lowerCAmelCase : Union[str, Any] = k_new.replace('.aspp_layer.', '.' )
if ".aspp_pool." in k:
lowerCAmelCase : Tuple = k_new.replace('.aspp_pool.', '.' )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(_UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCAmelCase : str = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : Tuple = get_mobilevitva_config(_UpperCAmelCase, _UpperCAmelCase )
# load original state_dict
lowerCAmelCase : Optional[int] = torch.load(_UpperCAmelCase, map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowerCAmelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(_UpperCAmelCase ).eval()
lowerCAmelCase : Optional[int] = False
else:
lowerCAmelCase : str = MobileViTVaForImageClassification(_UpperCAmelCase ).eval()
lowerCAmelCase : Tuple = False
# remove and rename some keys of load the original model
lowerCAmelCase : Dict = checkpoint
remove_unused_keys(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = create_rename_keys(_UpperCAmelCase, base_model=_UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# load modified state_dict
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCAmelCase : Any = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
lowerCAmelCase : Dict = image_processor(images=prepare_img(), return_tensors='pt' )
lowerCAmelCase : Optional[int] = model(**_UpperCAmelCase )
# verify classification model
if task_name.startswith('imagenet' ):
lowerCAmelCase : List[str] = outputs.logits
lowerCAmelCase : Union[str, Any] = logits.argmax(-1 ).item()
print('Predicted class:', model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCAmelCase : List[str] = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3], _UpperCAmelCase, atol=1e-4 )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : str = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 323
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__A : List[Any] = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
def __init__( self : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 323
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A : Any = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__A : Tuple = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__A : Any = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__A : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
__A : Optional[Any] = np.expand_dims(test_image, axis=0)
__A : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A : Optional[int] = '''Normal'''
if result[0][0] == 1:
__A : str = '''Abnormality detected'''
| 323
| 1
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 323
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A : str = logging.getLogger(__name__)
class __A ( lowerCAmelCase ):
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None ):
lowerCAmelCase : List[Any] = self.layer[current_layer](UpperCAmelCase_ , UpperCAmelCase_ , head_mask[current_layer] )
lowerCAmelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = BertEncoderWithPabee(UpperCAmelCase_ )
self.init_weights()
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = 0
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
lowerCAmelCase : int = threshold
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = patience
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Tuple = 0
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(UpperCAmelCase_ )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if token_type_ids is None:
lowerCAmelCase : Union[str, Any] = torch.zeros(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = encoder_hidden_states.size()
lowerCAmelCase : Optional[int] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
lowerCAmelCase : Tuple = self.invert_attention_mask(UpperCAmelCase_ )
else:
lowerCAmelCase : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase : Optional[Any] = self.get_head_mask(UpperCAmelCase_ , self.config.num_hidden_layers )
lowerCAmelCase : int = self.embeddings(
input_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ )
lowerCAmelCase : List[str] = embedding_output
if self.training:
lowerCAmelCase : Tuple = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase : Dict = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : List[str] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = output_layers[i](output_dropout(UpperCAmelCase_ ) )
res.append(UpperCAmelCase_ )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase : Union[str, Any] = self.encoder(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = self.pooler(encoder_outputs[0] )
lowerCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase_ )]
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = output_layers[i](UpperCAmelCase_ )
if regression:
lowerCAmelCase : List[str] = logits.detach()
if patient_result is not None:
lowerCAmelCase : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase : Any = 0
else:
lowerCAmelCase : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase : Optional[Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase_ ) ):
patient_counter += 1
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[Any] = logits
if patient_counter == self.patience:
break
lowerCAmelCase : Dict = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Tuple = config.num_labels
lowerCAmelCase : int = BertModelWithPabee(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : List[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=None , ):
lowerCAmelCase : int = self.bert(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase : Any = (logits[-1],)
if labels is not None:
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[int] = 0
for ix, logits_item in enumerate(UpperCAmelCase_ ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Tuple = MSELoss()
lowerCAmelCase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : Tuple = CrossEntropyLoss()
lowerCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase : str = (total_loss / total_weights,) + outputs
return outputs
| 323
| 1
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Tuple = (CMStochasticIterativeScheduler,)
lowerCAmelCase_ : List[Any] = 10
def lowercase__ ( self : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Dict = {
'num_train_timesteps': 201,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
config.update(**UpperCAmelCase_ )
return config
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = 10
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0](**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = scheduler.timesteps[0]
lowerCAmelCase : List[str] = scheduler.timesteps[1]
lowerCAmelCase : Union[str, Any] = self.dummy_sample
lowerCAmelCase : List[Any] = 0.1 * sample
lowerCAmelCase : str = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
lowerCAmelCase : Any = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : Any ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def lowercase__ ( self : str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCAmelCase_ )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase : List[Any] = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Tuple = 1
scheduler.set_timesteps(UpperCAmelCase_ )
lowerCAmelCase : Dict = scheduler.timesteps
lowerCAmelCase : Any = torch.manual_seed(0 )
lowerCAmelCase : Dict = self.dummy_model()
lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCAmelCase_ ):
# 1. scale model input
lowerCAmelCase : str = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# 2. predict noise residual
lowerCAmelCase : List[Any] = model(UpperCAmelCase_ , UpperCAmelCase_ )
# 3. predict previous sample x_t-1
lowerCAmelCase : List[Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
lowerCAmelCase : List[Any] = pred_prev_sample
lowerCAmelCase : Dict = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowerCAmelCase : Any = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = self.scheduler_classes[0]
lowerCAmelCase : int = self.get_scheduler_config()
lowerCAmelCase : Any = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [106, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
lowerCAmelCase : Any = scheduler.timesteps
lowerCAmelCase : int = torch.manual_seed(0 )
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowerCAmelCase : Optional[Any] = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# 2. predict noise residual
lowerCAmelCase : Dict = model(UpperCAmelCase_ , UpperCAmelCase_ )
# 3. predict previous sample x_t-1
lowerCAmelCase : str = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
lowerCAmelCase : List[str] = pred_prev_sample
lowerCAmelCase : List[Any] = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowerCAmelCase : List[str] = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCAmelCase_ , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : str = self.scheduler_classes[0]
lowerCAmelCase : int = self.get_scheduler_config()
lowerCAmelCase : Union[str, Any] = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Dict = [39, 30, 12, 1, 0]
lowerCAmelCase : str = len(UpperCAmelCase_ )
with self.assertRaises(UpperCAmelCase_ , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase : List[Any] = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
| 323
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = "deberta-v2"
def __init__( self : int , UpperCAmelCase_ : Dict=128100 , UpperCAmelCase_ : Optional[int]=1536 , UpperCAmelCase_ : Tuple=24 , UpperCAmelCase_ : Any=24 , UpperCAmelCase_ : Any=6144 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=1E-7 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : int="gelu" , **UpperCAmelCase_ : int , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Union[str, Any] = relative_attention
lowerCAmelCase : List[Any] = max_relative_positions
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase_ ) == str:
lowerCAmelCase : Tuple = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase : str = pos_att_type
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : str = kwargs.get('pooler_hidden_size' , UpperCAmelCase_ )
lowerCAmelCase : Tuple = pooler_dropout
lowerCAmelCase : Union[str, Any] = pooler_hidden_act
class __A ( lowerCAmelCase ):
@property
def lowercase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
lowerCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase__ ( self : int ):
return 12
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : "PreTrainedTokenizerBase" = None , ):
lowerCAmelCase : List[str] = super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 323
| 1
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__A : Optional[Any] = get_tests_dir('''fixtures/dummy-config.json''')
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = 0
def lowercase__ ( self : Any ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Dict = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = AutoConfig.for_model('roberta' )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase : Optional[int] = os.path.join(UpperCAmelCase_ , 'fake-roberta' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
try:
AutoConfig.register('custom' , UpperCAmelCase_ )
# Wrong model type will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register('model' , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register('bert' , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase : Optional[int] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowercase__ ( self : Tuple ):
with self.assertRaisesRegex(
UpperCAmelCase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained('bert-base' )
def lowercase__ ( self : Tuple ):
with self.assertRaisesRegex(
UpperCAmelCase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCAmelCase_ , revision='aaaaaa' )
def lowercase__ ( self : List[Any] ):
with self.assertRaisesRegex(
UpperCAmelCase_ , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
lowerCAmelCase : str = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def lowercase__ ( self : str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
lowerCAmelCase : int = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=UpperCAmelCase_ )
lowerCAmelCase : Dict = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def lowercase__ ( self : Optional[int] ):
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Any = "new-model"
try:
AutoConfig.register('new-model' , UpperCAmelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 323
|
__A : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A : Dict = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A : Optional[int] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A : Optional[int] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A : Tuple = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A : int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A : Optional[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 323
| 1
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__A : int = 25_0004
__A : Tuple = 25_0020
@require_sentencepiece
@require_tokenizers
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = MBartTokenizer
lowerCAmelCase_ : List[Any] = MBartTokenizerFast
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Tuple = True
def lowercase__ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Any = MBartTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Dict = MBartTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
lowerCAmelCase : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase : Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowercase__ ( self : Tuple ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase : str = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : int = tempfile.mkdtemp()
lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase : List[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase : List[Any] = tempfile.mkdtemp()
lowerCAmelCase : int = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
lowerCAmelCase : Dict = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase : Optional[int] = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
lowerCAmelCase_ : str = "facebook/mbart-large-en-ro"
lowerCAmelCase_ : List[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowerCAmelCase_ : Any = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowerCAmelCase_ : List[Any] = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE]
@classmethod
def lowercase__ ( cls : int ):
lowerCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCAmelCase : int = 1
return cls
def lowercase__ ( self : str ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def lowercase__ ( self : str ):
lowerCAmelCase : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
def lowercase__ ( self : int ):
self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids )
lowerCAmelCase : Optional[int] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCAmelCase : List[str] = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , UpperCAmelCase_ )
lowerCAmelCase : Any = 10
lowerCAmelCase : str = self.tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = tempfile.mkdtemp()
lowerCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = MBartTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase_ )
@require_torch
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , return_tensors='pt' )
lowerCAmelCase : str = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCAmelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = self.tokenizer(self.src_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=3 , return_tensors='pt' )
lowerCAmelCase : Any = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=10 , return_tensors='pt' )
lowerCAmelCase : List[Any] = targets['input_ids']
lowerCAmelCase : List[str] = shift_tokens_right(UpperCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : str ):
lowerCAmelCase : Tuple = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 323
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
# flake8: noqa
# Lint as: python3
__A : List[str] = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 323
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Any = 13
lowerCAmelCase : Union[str, Any] = 7
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[Any] = 32
lowerCAmelCase : List[str] = 2
lowerCAmelCase : str = 4
lowerCAmelCase : Optional[Any] = 37
lowerCAmelCase : List[Any] = 'gelu'
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Optional[Any] = 512
lowerCAmelCase : Dict = 16
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Union[str, Any] = 0.02
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 4
lowerCAmelCase : Any = None
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = TFRoFormerModel(config=UpperCAmelCase_ )
lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : str = [input_ids, input_mask]
lowerCAmelCase : Any = model(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = True
lowerCAmelCase : List[str] = TFRoFormerForCausalLM(config=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : List[str] = model(UpperCAmelCase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : str = self.num_labels
lowerCAmelCase : Optional[Any] = TFRoFormerForSequenceClassification(config=UpperCAmelCase_ )
lowerCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : str = TFRoFormerForMultipleChoice(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : Any = TFRoFormerForTokenClassification(config=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[int] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase_ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = TFRoFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase : Any = 50000
lowerCAmelCase : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = tf.constant([[4, 10]] )
lowerCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase : int = emba(input_ids.shape )
lowerCAmelCase : str = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : List[Any] ):
# 2,12,16,64
lowerCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : List[str] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase : List[Any] = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase , lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
| 323
| 1
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A : List[Any] = '''pt'''
elif is_tf_available():
__A : List[str] = '''tf'''
else:
__A : Optional[Any] = '''jax'''
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : str = PerceiverTokenizer
lowerCAmelCase_ : int = False
def lowercase__ ( self : List[str] ):
super().setUp()
lowerCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : Union[str, Any] ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase__ ( self : Optional[Any] , **UpperCAmelCase_ : List[str] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : str=20 , UpperCAmelCase_ : List[str]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase : Dict = []
for i in range(len(UpperCAmelCase_ ) ):
try:
lowerCAmelCase : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase : Optional[Any] = list(filter(lambda UpperCAmelCase_ : re.match(R'^[ a-zA-Z]+$' , t[1] ) , UpperCAmelCase_ ) )
lowerCAmelCase : Union[str, Any] = list(filter(lambda UpperCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase_ ) , UpperCAmelCase_ ) )
if max_length is not None and len(UpperCAmelCase_ ) > max_length:
lowerCAmelCase : Optional[int] = toks[:max_length]
if min_length is not None and len(UpperCAmelCase_ ) < min_length and len(UpperCAmelCase_ ) > 0:
while len(UpperCAmelCase_ ) < min_length:
lowerCAmelCase : List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase : Optional[Any] = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
if " " not in output_txt and len(UpperCAmelCase_ ) > 1:
lowerCAmelCase : List[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase_ )
)
if with_prefix_space:
lowerCAmelCase : List[str] = ' ' + output_txt
lowerCAmelCase : Tuple = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
return output_txt, output_ids
def lowercase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self.perceiver_tokenizer
lowerCAmelCase : Optional[int] = 'Unicode €.'
lowerCAmelCase : str = tokenizer(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , UpperCAmelCase_ )
# decoding
lowerCAmelCase : List[Any] = tokenizer.decode(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , '[CLS]Unicode €.[SEP]' )
lowerCAmelCase : Union[str, Any] = tokenizer('e è é ê ë' )
lowerCAmelCase : int = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , UpperCAmelCase_ )
# decoding
lowerCAmelCase : List[Any] = tokenizer.decode(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.perceiver_tokenizer
lowerCAmelCase : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCAmelCase : Optional[int] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowerCAmelCase : Optional[Any] = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
if FRAMEWORK != "jax":
lowerCAmelCase : Dict = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.perceiver_tokenizer
lowerCAmelCase : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCAmelCase_ )
self.assertIn('attention_mask' , UpperCAmelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = self.perceiver_tokenizer
lowerCAmelCase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
lowerCAmelCase : List[str] = tokenizer(
text_target=UpperCAmelCase_ , max_length=32 , padding='max_length' , truncation=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase__ ( self : Union[str, Any] ):
# safety check on max_len default value so we are sure the test works
lowerCAmelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase : List[str] = tempfile.mkdtemp()
lowerCAmelCase : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
tokenizer.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : str = tokenizer.__class__.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : int = after_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
shutil.rmtree(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase : List[str] = tempfile.mkdtemp()
lowerCAmelCase : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCAmelCase : List[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCAmelCase : Tuple = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
tokenizer.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.__class__.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = after_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase : Tuple = tokenizer.__class__.from_pretrained(UpperCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase : List[Any] = json.load(UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase : List[Any] = json.load(UpperCAmelCase_ )
lowerCAmelCase : str = [f"<extra_id_{i}>" for i in range(125 )]
lowerCAmelCase : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCAmelCase : Union[str, Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCAmelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase : str = tokenizer_class.from_pretrained(
UpperCAmelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase : Dict = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCAmelCase_ )]
lowerCAmelCase : List[str] = tokenizer_class.from_pretrained(
UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase__ ( self : Dict ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : Union[str, Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : List[Any] ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase : Optional[int] = self.get_tokenizers(fast=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase : int = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_string(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 323
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323
| 1
|
from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 0, _UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
lowerCAmelCase : str = len(_UpperCAmelCase )
while lo < hi:
lowerCAmelCase : int = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowerCAmelCase : int = mid + 1
else:
lowerCAmelCase : int = mid
return lo
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 0, _UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
lowerCAmelCase : str = len(_UpperCAmelCase )
while lo < hi:
lowerCAmelCase : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowerCAmelCase : Optional[Any] = mid + 1
else:
lowerCAmelCase : Optional[Any] = mid
return lo
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 0, _UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 0, _UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int | None:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : List[Any] = len(_UpperCAmelCase ) - 1
while left <= right:
lowerCAmelCase : Tuple = left + (right - left) // 2
lowerCAmelCase : str = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowerCAmelCase : Tuple = midpoint - 1
else:
lowerCAmelCase : Optional[Any] = midpoint + 1
return None
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int | None:
'''simple docstring'''
lowerCAmelCase : Dict = bisect.bisect_left(_UpperCAmelCase, _UpperCAmelCase )
if index != len(_UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
lowerCAmelCase : Tuple = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCAmelCase, _UpperCAmelCase, midpoint + 1, _UpperCAmelCase )
if __name__ == "__main__":
__A : Union[str, Any] = input('''Enter numbers separated by comma:\n''').strip()
__A : Dict = sorted(int(item) for item in user_input.split(''','''))
__A : List[str] = int(input('''Enter a single number to be found in the list:\n'''))
__A : List[str] = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 323
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 323
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323
| 1
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2], unknown_args[1::2] )}
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ArgumentParser(
'HuggingFace Datasets CLI tool', usage='datasets-cli <command> [<args>]', allow_abbrev=_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_UpperCAmelCase )
EnvironmentCommand.register_subcommand(_UpperCAmelCase )
TestCommand.register_subcommand(_UpperCAmelCase )
RunBeamCommand.register_subcommand(_UpperCAmelCase )
DummyDataCommand.register_subcommand(_UpperCAmelCase )
# Parse args
lowerCAmelCase , lowerCAmelCase : Optional[int] = parser.parse_known_args()
if not hasattr(_UpperCAmelCase, 'func' ):
parser.print_help()
exit(1 )
lowerCAmelCase : str = parse_unknown_args(_UpperCAmelCase )
# Run
lowerCAmelCase : List[str] = args.func(_UpperCAmelCase, **_UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 323
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return x + 2
class __A ( unittest.TestCase ):
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = 'x = 3'
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
lowerCAmelCase : Dict = 'x = y'
lowerCAmelCase : List[Any] = {'y': 5}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 5, 'y': 5} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = 'y = add_two(x)'
lowerCAmelCase : int = {'x': 3}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 'x = 3'
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = 'x = 3\ny = 5'
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = 'text = f\'This is x: {x}.\''
lowerCAmelCase : str = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'text': 'This is x: 3.'} )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 2} )
lowerCAmelCase : Any = {'x': 8}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 8, 'y': 5} )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = 'test_list = [x, add_two(x)]'
lowerCAmelCase : Optional[Any] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [3, 5] )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : int = 'y = x'
lowerCAmelCase : Optional[int] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 3} )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = 'test_list = [x, add_two(x)]\ntest_list[1]'
lowerCAmelCase : List[str] = {'x': 3}
lowerCAmelCase : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
lowerCAmelCase : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
lowerCAmelCase : List[Any] = {'x': 3}
lowerCAmelCase : Optional[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = 'x = 0\nfor i in range(3):\n x = i'
lowerCAmelCase : str = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {'range': range} , state=UpperCAmelCase_ )
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 2, 'i': 2} )
| 323
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
lowerCAmelCase : int = int(number**0.5 )
return number == sq * sq
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> tuple[int, int]:
'''simple docstring'''
lowerCAmelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCAmelCase : int = x_den * y_den * z_den
lowerCAmelCase : int = gcd(_UpperCAmelCase, _UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 35 ) -> int:
'''simple docstring'''
lowerCAmelCase : set = set()
lowerCAmelCase : int
lowerCAmelCase : Fraction = Fraction(0 )
lowerCAmelCase : tuple[int, int]
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
lowerCAmelCase : Dict = x_num * y_den + x_den * y_num
lowerCAmelCase : List[Any] = x_den * y_den
lowerCAmelCase : Tuple = gcd(_UpperCAmelCase, _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase : Tuple = add_three(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowerCAmelCase : Any = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCAmelCase : int = x_den * x_den * y_den * y_den
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowerCAmelCase : Optional[Any] = int(sqrt(_UpperCAmelCase ) )
lowerCAmelCase : List[Any] = int(sqrt(_UpperCAmelCase ) )
lowerCAmelCase : List[Any] = gcd(_UpperCAmelCase, _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase : str = add_three(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=-1
lowerCAmelCase : int = x_num * y_num
lowerCAmelCase : Tuple = x_den * y_num + x_num * y_den
lowerCAmelCase : Any = gcd(_UpperCAmelCase, _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase : List[str] = add_three(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowerCAmelCase : str = x_num * x_num * y_num * y_num
lowerCAmelCase : Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowerCAmelCase : Dict = int(sqrt(_UpperCAmelCase ) )
lowerCAmelCase : Union[str, Any] = int(sqrt(_UpperCAmelCase ) )
lowerCAmelCase : Any = gcd(_UpperCAmelCase, _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase : Dict = add_three(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
for num, den in unique_s:
total += Fraction(_UpperCAmelCase, _UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 323
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323
| 1
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A : Dict = 16
__A : Optional[int] = 32
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 16 ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase : Union[str, Any] = DatasetDict(
{
'train': dataset['train'].select(_UpperCAmelCase ),
'validation': dataset['train'].select(_UpperCAmelCase ),
'test': dataset['validation'],
} )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase : Union[str, Any] = tokenizer(examples['sentence1'], examples['sentence2'], truncation=_UpperCAmelCase, max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase : Optional[Any] = datasets.map(
_UpperCAmelCase, batched=_UpperCAmelCase, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase : Any = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase : Union[str, Any] = 8
else:
lowerCAmelCase : int = None
return tokenizer.pad(
_UpperCAmelCase, padding='longest', max_length=_UpperCAmelCase, pad_to_multiple_of=_UpperCAmelCase, return_tensors='pt', )
# Instantiate dataloaders.
lowerCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets['train'], shuffle=_UpperCAmelCase, collate_fn=_UpperCAmelCase, batch_size=_UpperCAmelCase )
lowerCAmelCase : List[Any] = DataLoader(
tokenized_datasets['validation'], shuffle=_UpperCAmelCase, collate_fn=_UpperCAmelCase, batch_size=_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets['test'], shuffle=_UpperCAmelCase, collate_fn=_UpperCAmelCase, batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader, test_dataloader
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : int = []
# Download the dataset
lowerCAmelCase : Optional[Any] = load_dataset('glue', 'mrpc' )
# Create our splits
lowerCAmelCase : List[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCAmelCase : List[Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase : Any = config['lr']
lowerCAmelCase : str = int(config['num_epochs'] )
lowerCAmelCase : Tuple = int(config['seed'] )
lowerCAmelCase : Optional[Any] = int(config['batch_size'] )
lowerCAmelCase : Optional[Any] = evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase : List[str] = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
# New Code #
# Create our folds:
lowerCAmelCase : Optional[int] = kfold.split(np.zeros(datasets['train'].num_rows ), datasets['train']['label'] )
lowerCAmelCase : int = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_UpperCAmelCase ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = get_fold_dataloaders(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase : str = AdamW(params=model.parameters(), lr=_UpperCAmelCase )
# Instantiate scheduler
lowerCAmelCase : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = accelerator.prepare(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : Any = outputs.loss
lowerCAmelCase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**_UpperCAmelCase )
lowerCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase : Tuple = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_UpperCAmelCase, references=_UpperCAmelCase, )
lowerCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", _UpperCAmelCase )
# New Code #
# We also run predictions on the test set at the very end
lowerCAmelCase : Optional[int] = []
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase : List[str] = model(**_UpperCAmelCase )
lowerCAmelCase : List[Any] = outputs.logits
lowerCAmelCase , lowerCAmelCase : str = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_UpperCAmelCase, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCAmelCase : List[str] = torch.cat(_UpperCAmelCase, dim=0 )
lowerCAmelCase : List[Any] = torch.stack(_UpperCAmelCase, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCAmelCase : List[str] = metric.compute(predictions=_UpperCAmelCase, references=_UpperCAmelCase )
accelerator.print('Average test metrics from all folds:', _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=_UpperCAmelCase, default=_UpperCAmelCase, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds', type=_UpperCAmelCase, default=3, help='The number of splits to perform across the dataset' )
lowerCAmelCase : List[Any] = parser.parse_args()
lowerCAmelCase : Any = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase, _UpperCAmelCase )
if __name__ == "__main__":
main()
| 323
|
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 1
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : Dict = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
require_version(deps[pkg], _UpperCAmelCase )
| 323
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
from copy import deepcopy
class __A :
def __init__( self : Tuple , UpperCAmelCase_ : list[int] | None = None , UpperCAmelCase_ : int | None = None ):
if arr is None and size is not None:
lowerCAmelCase : Union[str, Any] = size
lowerCAmelCase : str = [0] * size
elif arr is not None:
self.init(UpperCAmelCase_ )
else:
raise ValueError('Either arr or size must be specified' )
def lowercase__ ( self : Any , UpperCAmelCase_ : list[int] ):
lowerCAmelCase : Tuple = len(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = deepcopy(UpperCAmelCase_ )
for i in range(1 , self.size ):
lowerCAmelCase : Any = self.next_(UpperCAmelCase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCAmelCase : List[str] = self.next_(UpperCAmelCase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowercase__ ( UpperCAmelCase_ : int ):
return index + (index & (-index))
@staticmethod
def lowercase__ ( UpperCAmelCase_ : int ):
return index - (index & (-index))
def lowercase__ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase : Tuple = self.next_(UpperCAmelCase_ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : int ):
if right == 0:
return 0
lowerCAmelCase : Optional[int] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase : Optional[int] = self.prev(UpperCAmelCase_ )
return result
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ )
def lowercase__ ( self : str , UpperCAmelCase_ : int ):
return self.query(UpperCAmelCase_ , index + 1 )
def lowercase__ ( self : str , UpperCAmelCase_ : int ):
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase : Optional[int] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323
| 1
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 323
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : str ):
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : str = BlipImageProcessor()
lowerCAmelCase : int = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCAmelCase : int = InstructBlipProcessor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).tokenizer
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).image_processor
def lowercase__ ( self : List[str] , **UpperCAmelCase_ : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).qformer_tokenizer
def lowercase__ ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : int ):
lowerCAmelCase : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : Dict = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : int = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : List[Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : int = self.get_tokenizer()
lowerCAmelCase : Tuple = self.get_qformer_tokenizer()
lowerCAmelCase : Optional[int] = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : int = self.prepare_image_inputs()
lowerCAmelCase : Optional[int] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : Dict = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : Any = self.get_qformer_tokenizer()
lowerCAmelCase : int = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : List[str] = 'lower newer'
lowerCAmelCase : int = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Dict = tokenizer(UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = qformer_tokenizer(UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def lowercase__ ( self : int ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : List[Any] = self.get_qformer_tokenizer()
lowerCAmelCase : Union[str, Any] = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : Any ):
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : Optional[Any] = self.get_tokenizer()
lowerCAmelCase : List[Any] = self.get_qformer_tokenizer()
lowerCAmelCase : List[Any] = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : str = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : List[Any] = self.get_tokenizer()
lowerCAmelCase : Any = self.get_qformer_tokenizer()
lowerCAmelCase : Tuple = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = 'lower newer'
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : Optional[int] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 323
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323
| 1
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : str = (PNDMScheduler,)
lowerCAmelCase_ : str = (("num_inference_steps", 50),)
def lowercase__ ( self : int , **UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : List[str] = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**UpperCAmelCase_ )
return config
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : List[str]=0 , **UpperCAmelCase_ : Dict ):
lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
lowerCAmelCase : Optional[int] = kwargs.pop('num_inference_steps' , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Any = self.get_scheduler_config(**UpperCAmelCase_ )
lowerCAmelCase : List[Any] = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
lowerCAmelCase : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
lowerCAmelCase : List[str] = scheduler_class.from_pretrained(UpperCAmelCase_ )
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
lowerCAmelCase : List[str] = dummy_past_residuals[:]
lowerCAmelCase : List[str] = scheduler.step_prk(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
lowerCAmelCase : Dict = new_scheduler.step_prk(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase : List[str] = scheduler.step_plms(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
lowerCAmelCase : str = new_scheduler.step_plms(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : Optional[int] ):
pass
def lowercase__ ( self : int , UpperCAmelCase_ : Any=0 , **UpperCAmelCase_ : List[str] ):
lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase : Dict = kwargs.pop('num_inference_steps' , UpperCAmelCase_ )
lowerCAmelCase : str = self.dummy_sample
lowerCAmelCase : Union[str, Any] = 0.1 * sample
lowerCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase : Union[str, Any] = dummy_past_residuals[:]
lowerCAmelCase : Dict = scheduler.step_prk(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
lowerCAmelCase : Tuple = new_scheduler.step_prk(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase : Optional[int] = scheduler.step_plms(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
lowerCAmelCase : List[str] = new_scheduler.step_plms(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : Optional[Any] , **UpperCAmelCase_ : Tuple ):
lowerCAmelCase : int = self.scheduler_classes[0]
lowerCAmelCase : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase_ )
lowerCAmelCase : Tuple = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 10
lowerCAmelCase : Optional[Any] = self.dummy_model()
lowerCAmelCase : Any = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase : str = model(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = scheduler.step_prk(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase : str = model(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Tuple = scheduler.step_plms(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
return sample
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase : Any = kwargs.pop('num_inference_steps' , UpperCAmelCase_ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Any = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase_ , 'set_timesteps' ):
scheduler.set_timesteps(UpperCAmelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase_ , 'set_timesteps' ):
lowerCAmelCase : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase : Optional[int] = dummy_past_residuals[:]
lowerCAmelCase : str = scheduler.step_prk(UpperCAmelCase_ , 0 , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
lowerCAmelCase : Optional[Any] = scheduler.step_prk(UpperCAmelCase_ , 1 , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase : List[str] = scheduler.step_plms(UpperCAmelCase_ , 0 , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
lowerCAmelCase : Any = scheduler.step_plms(UpperCAmelCase_ , 1 , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : Dict ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def lowercase__ ( self : str ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase_ )
lowerCAmelCase : int = self.scheduler_classes[0]
lowerCAmelCase : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase : Tuple = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowercase__ ( self : Optional[int] ):
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_ )
def lowercase__ ( self : Any ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def lowercase__ ( self : Dict ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase_ )
def lowercase__ ( self : Dict ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowerCAmelCase : Dict = 27
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : List[Any] = self.dummy_sample
lowerCAmelCase : List[Any] = 0.1 * sample
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase : List[str] = scheduler.step_prk(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
def lowercase__ ( self : Optional[Any] ):
with self.assertRaises(UpperCAmelCase_ ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCAmelCase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.full_loop()
lowerCAmelCase : Tuple = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowerCAmelCase : str = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def lowercase__ ( self : Tuple ):
lowerCAmelCase : List[str] = self.full_loop(prediction_type='v_prediction' )
lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[int] = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def lowercase__ ( self : Dict ):
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase : Any = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01 )
lowerCAmelCase : Tuple = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def lowercase__ ( self : Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase : Any = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01 )
lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[int] = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 323
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[Any] = ''
else:
lowerCAmelCase : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : List[str] = dct.pop(_UpperCAmelCase )
lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : str = ViTMSNConfig()
lowerCAmelCase : str = 1_000
lowerCAmelCase : List[str] = 'datasets/huggingface/label-files'
lowerCAmelCase : int = 'imagenet-1k-id2label.json'
lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[Any] = 384
lowerCAmelCase : List[Any] = 1_536
lowerCAmelCase : Union[str, Any] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase : List[Any] = 1_024
lowerCAmelCase : Any = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : Any = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase : Any = 4
elif "l7" in checkpoint_url:
lowerCAmelCase : int = 7
lowerCAmelCase : str = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Tuple = 16
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : List[str] = ViTMSNModel(_UpperCAmelCase )
lowerCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )['target_encoder']
lowerCAmelCase : int = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
lowerCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase, base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase, base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Dict = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
lowerCAmelCase : Any = ViTImageProcessor(
size=config.image_size, image_mean=_UpperCAmelCase, image_std=_UpperCAmelCase )
lowerCAmelCase : List[Any] = image_processor(images=_UpperCAmelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _UpperCAmelCase, atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : List[str] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323
| 1
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A : Any = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__A : Tuple = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__A : Any = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__A : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
__A : Optional[Any] = np.expand_dims(test_image, axis=0)
__A : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A : Optional[int] = '''Normal'''
if result[0][0] == 1:
__A : str = '''Abnormality detected'''
| 323
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : Tuple = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 1
|
# Imports
import numpy as np
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None ):
self.set_matricies(red=UpperCAmelCase_ , green=UpperCAmelCase_ , blue=UpperCAmelCase_ , red_edge=UpperCAmelCase_ , nir=UpperCAmelCase_ )
def lowercase__ ( self : str , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Tuple=None ):
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Union[str, Any] = green
if blue is not None:
lowerCAmelCase : Tuple = blue
if red_edge is not None:
lowerCAmelCase : Union[str, Any] = red_edge
if nir is not None:
lowerCAmelCase : Tuple = nir
return True
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple="" , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict=None ):
self.set_matricies(red=UpperCAmelCase_ , green=UpperCAmelCase_ , blue=UpperCAmelCase_ , red_edge=UpperCAmelCase_ , nir=UpperCAmelCase_ )
lowerCAmelCase : Dict = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def lowercase__ ( self : str ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def lowercase__ ( self : str ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowercase__ ( self : str ):
return self.nir * (self.red / (self.green**2))
def lowercase__ ( self : Union[str, Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowercase__ ( self : Optional[int] ):
return (self.nir - self.red) / (self.nir + self.red)
def lowercase__ ( self : int ):
return (self.nir - self.blue) / (self.nir + self.blue)
def lowercase__ ( self : int ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowercase__ ( self : Optional[Any] ):
return (self.nir - self.green) / (self.nir + self.green)
def lowercase__ ( self : Dict ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowercase__ ( self : List[Any] ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowercase__ ( self : Union[str, Any] ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowercase__ ( self : int ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[Any]=0.08 , UpperCAmelCase_ : str=1.22 , UpperCAmelCase_ : List[Any]=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowercase__ ( self : Union[str, Any] ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowercase__ ( self : Union[str, Any] ):
return (self.nir / self.green) - 1
def lowercase__ ( self : Dict ):
return (self.nir / self.redEdge) - 1
def lowercase__ ( self : Any ):
return (self.red - self.blue) / self.red
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowercase__ ( self : int ):
return self.nir - self.green
def lowercase__ ( self : int ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowercase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : List[Any]=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowercase__ ( self : Tuple ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def lowercase__ ( self : str , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None ):
return (self.nir - b) / (a * self.red)
def lowercase__ ( self : Dict ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowercase__ ( self : int ):
return (self.red + self.green + self.blue) / 30.5
def lowercase__ ( self : int ):
return self.nir / self.red
def lowercase__ ( self : Optional[int] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def lowercase__ ( self : List[Any] ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowercase__ ( self : List[Any] ):
return self.green / (self.nir + self.red + self.green)
def lowercase__ ( self : int ):
return self.nir / (self.nir + self.red + self.green)
def lowercase__ ( self : Any ):
return self.red / (self.nir + self.red + self.green)
def lowercase__ ( self : int ):
return (self.green - self.red) / (self.green + self.red)
def lowercase__ ( self : int ):
return (self.red - self.green) / (self.red + self.green)
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Optional[int] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowercase__ ( self : Optional[Any] ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowercase__ ( self : int ):
return self.nir / self.red
def lowercase__ ( self : Tuple ):
return (self.ndvi() + 0.5) ** (1 / 2)
def lowercase__ ( self : Any ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 323
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 323
| 1
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Any=[10, 20, 30, 40] , UpperCAmelCase_ : Union[str, Any]=[2, 2, 3, 2] , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , UpperCAmelCase_ : Optional[Any]=[2, 3, 4] , UpperCAmelCase_ : int=None , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : List[Any] = num_stages
lowerCAmelCase : Optional[int] = hidden_sizes
lowerCAmelCase : Dict = depths
lowerCAmelCase : str = is_training
lowerCAmelCase : Tuple = use_labels
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : str = initializer_range
lowerCAmelCase : int = out_features
lowerCAmelCase : Optional[Any] = out_indices
lowerCAmelCase : List[Any] = scope
def lowercase__ ( self : int ):
lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : List[str] = None
if self.use_labels:
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : str ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ):
lowerCAmelCase : Optional[int] = ConvNextVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
lowerCAmelCase : Dict = ConvNextVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Dict = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Any = model(UpperCAmelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = ConvNextVaBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = config_and_inputs
lowerCAmelCase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = config_and_inputs
lowerCAmelCase : int = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Tuple = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : str = False
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Dict = ConvNextVaModelTester(self )
lowerCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : List[Any] ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def lowercase__ ( self : Any ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def lowercase__ ( self : Any ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def lowercase__ ( self : Union[str, Any] ):
pass
def lowercase__ ( self : int ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase : List[str] = True
if model_class.__name__ in [
*get_values(UpperCAmelCase_ ),
*get_values(UpperCAmelCase_ ),
]:
continue
lowerCAmelCase : List[Any] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
lowerCAmelCase : int = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowercase__ ( self : Dict ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase : Any = False
lowerCAmelCase : Any = True
if (
model_class.__name__
in [*get_values(UpperCAmelCase_ ), *get_values(UpperCAmelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase : List[Any] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase : Tuple = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
lowerCAmelCase : List[str] = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowercase__ ( self : int ):
lowerCAmelCase , lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[Any] = model_class(UpperCAmelCase_ )
lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Tuple = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : str ):
def check_hidden_states_output(UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : str = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Any = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[Any] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Dict = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Any ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[str] = ConvNextVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : str ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(UpperCAmelCase_ )
lowerCAmelCase : List[str] = self.default_image_processor
lowerCAmelCase : List[Any] = prepare_img()
lowerCAmelCase : int = preprocessor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**UpperCAmelCase_ )
# verify the logits
lowerCAmelCase : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
lowerCAmelCase : Tuple = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 323
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __A ( lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = "dinat"
lowerCAmelCase_ : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : Dict=[2, 4, 8, 16] , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase_ : int=3.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : str = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : List[Any] = len(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = num_heads
lowerCAmelCase : Tuple = kernel_size
lowerCAmelCase : List[str] = dilations
lowerCAmelCase : Any = mlp_ratio
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase : int = layer_scale_init_value
lowerCAmelCase : Optional[Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 323
| 1
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__A : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __A ( lowerCAmelCase ):
def __init__( self : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=None ):
lowerCAmelCase : int = {}
lowerCAmelCase : str = {}
if prompt is not None:
lowerCAmelCase : Tuple = prompt
if generate_kwargs is not None:
lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
lowerCAmelCase : Tuple = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[int] , UpperCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase_ : Dict ):
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple=None ):
lowerCAmelCase : Dict = load_image(UpperCAmelCase_ )
if prompt is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError(
f"Received an invalid text input, got - {type(UpperCAmelCase_ )} - but expected a single string. "
'Note also that one single text can be provided for conditional image to text generation.' )
lowerCAmelCase : Dict = self.model.config.model_type
if model_type == "git":
lowerCAmelCase : Union[str, Any] = self.image_processor(images=UpperCAmelCase_ , return_tensors=self.framework )
lowerCAmelCase : str = self.tokenizer(text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids
lowerCAmelCase : int = [self.tokenizer.cls_token_id] + input_ids
lowerCAmelCase : Optional[int] = torch.tensor(UpperCAmelCase_ ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
lowerCAmelCase : Tuple = self.image_processor(images=UpperCAmelCase_ , header_text=UpperCAmelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCAmelCase : Dict = self.image_processor(images=UpperCAmelCase_ , return_tensors=self.framework )
lowerCAmelCase : Optional[Any] = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCAmelCase_ )
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation" )
else:
lowerCAmelCase : Optional[Any] = self.image_processor(images=UpperCAmelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCAmelCase : str = None
return model_inputs
def lowercase__ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , UpperCAmelCase_ )
and all(x is None for x in model_inputs['input_ids'] )
):
lowerCAmelCase : str = None
if generate_kwargs is None:
lowerCAmelCase : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCAmelCase : Optional[int] = model_inputs.pop(self.model.main_input_name )
lowerCAmelCase : int = self.model.generate(UpperCAmelCase_ , **UpperCAmelCase_ , **UpperCAmelCase_ )
return model_outputs
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = []
for output_ids in model_outputs:
lowerCAmelCase : List[str] = {
'generated_text': self.tokenizer.decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , )
}
records.append(UpperCAmelCase_ )
return records
| 323
|
from manim import *
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : List[str] = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : str = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : int = [mem.copy() for i in range(4 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = Text('GPU' , font_size=24 )
lowerCAmelCase : Tuple = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : List[str] = Text('Model' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
for i, rect in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.8 )
target.move_to(UpperCAmelCase_ )
model_arr.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Tuple = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Union[str, Any] = Text('Disk' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
lowerCAmelCase : str = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = Square(0.3 )
input.set_fill(UpperCAmelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase_ , buff=0.5 )
self.play(Write(UpperCAmelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase_ , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase_ ) )
self.play(FadeOut(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = Arrow(start=UpperCAmelCase_ , end=UpperCAmelCase_ , color=UpperCAmelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase : int = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
lowerCAmelCase : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase_ ) , Circumscribe(model_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase : int = AnimationGroup(
FadeOut(UpperCAmelCase_ , run_time=0.5 ) , MoveToTarget(UpperCAmelCase_ , run_time=0.5 ) , FadeIn(UpperCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase : int = a_c
lowerCAmelCase : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase_ ) , FadeOut(UpperCAmelCase_ , run_time=0.5 ) , )
lowerCAmelCase : int = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , MoveToTarget(UpperCAmelCase_ ) )
self.wait()
| 323
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
__A : List[str] = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[PIL.Image.Image, np.ndarray]
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : PriorTransformer , UpperCAmelCase_ : CLIPVisionModel , UpperCAmelCase_ : CLIPImageProcessor , UpperCAmelCase_ : HeunDiscreteScheduler , UpperCAmelCase_ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=UpperCAmelCase_ , image_encoder=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , renderer=UpperCAmelCase_ , )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ):
if latents is None:
lowerCAmelCase : Tuple = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
lowerCAmelCase : Dict = latents.to(UpperCAmelCase_ )
lowerCAmelCase : int = latents * scheduler.init_noise_sigma
return latents
def lowercase__ ( self : int , UpperCAmelCase_ : Tuple=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
lowerCAmelCase : Optional[Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ )
@property
def lowercase__ ( self : Optional[int] ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCAmelCase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowerCAmelCase : Any = torch.cat(UpperCAmelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCAmelCase_ , axis=0 )
if not isinstance(UpperCAmelCase_ , torch.Tensor ):
lowerCAmelCase : Any = self.image_processor(UpperCAmelCase_ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCAmelCase : List[str] = image.to(dtype=self.image_encoder.dtype , device=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.image_encoder(UpperCAmelCase_ )['last_hidden_state']
lowerCAmelCase : Dict = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCAmelCase : Optional[int] = image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase : Optional[Any] = torch.zeros_like(UpperCAmelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : Dict = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase_ )
def __call__( self : Tuple , UpperCAmelCase_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 25 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : float = 4.0 , UpperCAmelCase_ : int = 64 , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ):
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
lowerCAmelCase : Optional[Any] = 1
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
lowerCAmelCase : Any = image.shape[0]
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCAmelCase : Optional[Any] = len(UpperCAmelCase_ )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCAmelCase_ )}" )
lowerCAmelCase : int = self._execution_device
lowerCAmelCase : Dict = batch_size * num_images_per_prompt
lowerCAmelCase : Tuple = guidance_scale > 1.0
lowerCAmelCase : Optional[int] = self._encode_image(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# prior
self.scheduler.set_timesteps(UpperCAmelCase_ , device=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = self.scheduler.timesteps
lowerCAmelCase : Union[str, Any] = self.prior.config.num_embeddings
lowerCAmelCase : Union[str, Any] = self.prior.config.embedding_dim
lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCAmelCase : Tuple = latents.reshape(latents.shape[0] , UpperCAmelCase_ , UpperCAmelCase_ )
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : List[Any] = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.prior(
UpperCAmelCase_ , timestep=UpperCAmelCase_ , proj_embedding=UpperCAmelCase_ , ).predicted_image_embedding
# remove the variance
lowerCAmelCase , lowerCAmelCase : str = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCAmelCase , lowerCAmelCase : Any = noise_pred.chunk(2 )
lowerCAmelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCAmelCase : Dict = self.scheduler.step(
UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = []
for i, latent in enumerate(UpperCAmelCase_ ):
print()
lowerCAmelCase : List[str] = self.renderer.decode(
latent[None, :] , UpperCAmelCase_ , size=UpperCAmelCase_ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCAmelCase_ )
lowerCAmelCase : str = torch.stack(UpperCAmelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
lowerCAmelCase : str = images.cpu().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = [self.numpy_to_pil(UpperCAmelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCAmelCase_ )
| 323
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A : Optional[int] = '''.'''
if __name__ == "__main__":
__A : Dict = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
__A : List[Any] = []
__A : Tuple = []
with open(doctest_file_path) as fp:
for line in fp:
__A : List[str] = line.strip()
__A : Optional[int] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A : Any = '''\n'''.join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 323
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A : Any = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__A : Tuple = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__A : Any = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__A : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
__A : Optional[Any] = np.expand_dims(test_image, axis=0)
__A : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A : Optional[int] = '''Normal'''
if result[0][0] == 1:
__A : str = '''Abnormality detected'''
| 323
| 1
|
import os
from distutils.util import strtobool
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for e in env_keys:
lowerCAmelCase : List[str] = int(os.environ.get(_UpperCAmelCase, -1 ) )
if val >= 0:
return val
return default
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Any:
'''simple docstring'''
lowerCAmelCase : str = os.environ.get(_UpperCAmelCase, str(_UpperCAmelCase ) )
return strtobool(_UpperCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase="no" ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Optional[int] = os.environ.get(_UpperCAmelCase, str(_UpperCAmelCase ) )
return value
| 323
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A : str = logging.getLogger(__name__)
class __A ( lowerCAmelCase ):
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None ):
lowerCAmelCase : List[Any] = self.layer[current_layer](UpperCAmelCase_ , UpperCAmelCase_ , head_mask[current_layer] )
lowerCAmelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = BertEncoderWithPabee(UpperCAmelCase_ )
self.init_weights()
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = 0
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
lowerCAmelCase : int = threshold
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = patience
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Tuple = 0
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(UpperCAmelCase_ )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if token_type_ids is None:
lowerCAmelCase : Union[str, Any] = torch.zeros(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = encoder_hidden_states.size()
lowerCAmelCase : Optional[int] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
lowerCAmelCase : Tuple = self.invert_attention_mask(UpperCAmelCase_ )
else:
lowerCAmelCase : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase : Optional[Any] = self.get_head_mask(UpperCAmelCase_ , self.config.num_hidden_layers )
lowerCAmelCase : int = self.embeddings(
input_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ )
lowerCAmelCase : List[str] = embedding_output
if self.training:
lowerCAmelCase : Tuple = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase : Dict = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : List[str] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = output_layers[i](output_dropout(UpperCAmelCase_ ) )
res.append(UpperCAmelCase_ )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase : Union[str, Any] = self.encoder(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = self.pooler(encoder_outputs[0] )
lowerCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase_ )]
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = output_layers[i](UpperCAmelCase_ )
if regression:
lowerCAmelCase : List[str] = logits.detach()
if patient_result is not None:
lowerCAmelCase : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase : Any = 0
else:
lowerCAmelCase : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase : Optional[Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase_ ) ):
patient_counter += 1
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[Any] = logits
if patient_counter == self.patience:
break
lowerCAmelCase : Dict = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Tuple = config.num_labels
lowerCAmelCase : int = BertModelWithPabee(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : List[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=None , ):
lowerCAmelCase : int = self.bert(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase : Any = (logits[-1],)
if labels is not None:
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[int] = 0
for ix, logits_item in enumerate(UpperCAmelCase_ ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Tuple = MSELoss()
lowerCAmelCase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : Tuple = CrossEntropyLoss()
lowerCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase : str = (total_loss / total_weights,) + outputs
return outputs
| 323
| 1
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A : int = logging.get_logger(__name__)
__A : Tuple = Dict[str, Any]
__A : Union[str, Any] = List[Prediction]
@add_end_docstrings(lowerCAmelCase )
class __A ( lowerCAmelCase ):
def __init__( self : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowercase__ ( self : int , **UpperCAmelCase_ : List[str] ):
lowerCAmelCase : Union[str, Any] = {}
if "threshold" in kwargs:
lowerCAmelCase : Union[str, Any] = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any ):
return super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self : int , UpperCAmelCase_ : Optional[int] ):
lowerCAmelCase : Tuple = load_image(UpperCAmelCase_ )
lowerCAmelCase : Dict = torch.IntTensor([[image.height, image.width]] )
lowerCAmelCase : Optional[int] = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
lowerCAmelCase : int = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
lowerCAmelCase : Union[str, Any] = target_size
return inputs
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : List[str] ):
lowerCAmelCase : Tuple = model_inputs.pop('target_size' )
lowerCAmelCase : Dict = self.model(**UpperCAmelCase_ )
lowerCAmelCase : List[str] = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
lowerCAmelCase : Optional[Any] = model_inputs['bbox']
return model_outputs
def lowercase__ ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple=0.9 ):
lowerCAmelCase : Tuple = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCAmelCase , lowerCAmelCase : Dict = target_size[0].tolist()
def unnormalize(UpperCAmelCase_ : Dict ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCAmelCase : List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCAmelCase : Optional[int] = [unnormalize(UpperCAmelCase_ ) for bbox in model_outputs['bbox'].squeeze(0 )]
lowerCAmelCase : List[str] = ['score', 'label', 'box']
lowerCAmelCase : Union[str, Any] = [dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) for vals in zip(scores.tolist() , UpperCAmelCase_ , UpperCAmelCase_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCAmelCase : str = self.image_processor.post_process_object_detection(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : str = raw_annotations[0]
lowerCAmelCase : Optional[Any] = raw_annotation['scores']
lowerCAmelCase : Tuple = raw_annotation['labels']
lowerCAmelCase : List[Any] = raw_annotation['boxes']
lowerCAmelCase : Optional[int] = scores.tolist()
lowerCAmelCase : Tuple = [self.model.config.idalabel[label.item()] for label in labels]
lowerCAmelCase : Union[str, Any] = [self._get_bounding_box(UpperCAmelCase_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCAmelCase : Optional[Any] = ['score', 'label', 'box']
lowerCAmelCase : Any = [
dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def lowercase__ ( self : Tuple , UpperCAmelCase_ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = box.int().tolist()
lowerCAmelCase : Optional[Any] = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 323
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = "deberta-v2"
def __init__( self : int , UpperCAmelCase_ : Dict=128100 , UpperCAmelCase_ : Optional[int]=1536 , UpperCAmelCase_ : Tuple=24 , UpperCAmelCase_ : Any=24 , UpperCAmelCase_ : Any=6144 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=1E-7 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : int="gelu" , **UpperCAmelCase_ : int , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Union[str, Any] = relative_attention
lowerCAmelCase : List[Any] = max_relative_positions
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase_ ) == str:
lowerCAmelCase : Tuple = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase : str = pos_att_type
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : str = kwargs.get('pooler_hidden_size' , UpperCAmelCase_ )
lowerCAmelCase : Tuple = pooler_dropout
lowerCAmelCase : Union[str, Any] = pooler_hidden_act
class __A ( lowerCAmelCase ):
@property
def lowercase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
lowerCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase__ ( self : int ):
return 12
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : "PreTrainedTokenizerBase" = None , ):
lowerCAmelCase : List[str] = super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 323
| 1
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = "laptop" ) -> DataFrame:
'''simple docstring'''
lowerCAmelCase : List[str] = f"https://www.amazon.in/laptop/s?k={product}"
lowerCAmelCase : Any = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
lowerCAmelCase : str = BeautifulSoup(requests.get(_UpperCAmelCase, headers=_UpperCAmelCase ).text )
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase : Optional[int] = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div', attrs={'class': 's-result-item', 'data-component-type': 's-search-result'}, ), soup.find_all('div', attrs={'class': 'a-row a-size-base a-color-base'} ), ):
try:
lowerCAmelCase : str = item.ha.text
lowerCAmelCase : Optional[Any] = 'https://www.amazon.in/' + item.ha.a['href']
lowerCAmelCase : Tuple = item.find('span', attrs={'class': 'a-offscreen'} ).text
try:
lowerCAmelCase : str = item.find('span', attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
lowerCAmelCase : Any = 'Not available'
try:
lowerCAmelCase : str = (
'₹'
+ item.find(
'span', attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
lowerCAmelCase : Tuple = ''
try:
lowerCAmelCase : List[str] = float(
(
(
float(product_mrp.strip('₹' ).replace(',', '' ) )
- float(product_price.strip('₹' ).replace(',', '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',', '' ) )
)
* 100 )
except ValueError:
lowerCAmelCase : Optional[Any] = float('nan' )
except AttributeError:
pass
lowerCAmelCase : Dict = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase : List[str] = ' '
lowerCAmelCase : Dict = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__A : Optional[int] = '''headphones'''
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 323
|
__A : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A : Dict = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A : Optional[int] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A : Optional[int] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A : Tuple = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A : int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A : Optional[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 323
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __A ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[Any] = StableUnCLIPImgaImgPipeline
lowerCAmelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase_ : Any = frozenset([] )
def lowercase__ ( self : int ):
lowerCAmelCase : str = 32
lowerCAmelCase : List[str] = embedder_hidden_size
# image encoding components
lowerCAmelCase : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase_ , projection_dim=UpperCAmelCase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase : Dict = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase_ )
lowerCAmelCase : List[str] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase_ , layers_per_block=1 , upcast_attention=UpperCAmelCase_ , use_linear_projection=UpperCAmelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = AutoencoderKL()
lowerCAmelCase : Optional[int] = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[int]=True ):
if str(UpperCAmelCase_ ).startswith('mps' ):
lowerCAmelCase : Union[str, Any] = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCAmelCase : Optional[Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCAmelCase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if pil_image:
lowerCAmelCase : str = input_image * 0.5 + 0.5
lowerCAmelCase : str = input_image.clamp(0 , 1 )
lowerCAmelCase : Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase : Any = DiffusionPipeline.numpy_to_pil(UpperCAmelCase_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase__ ( self : Any ):
lowerCAmelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Any = self.get_dummy_components()
lowerCAmelCase : Any = StableUnCLIPImgaImgPipeline(**UpperCAmelCase_ )
lowerCAmelCase : int = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCAmelCase_ )
inputs.update({'image_embeds': None} )
lowerCAmelCase : Tuple = sd_pipe(**UpperCAmelCase_ ).images
lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase : Tuple = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase__ ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase_ )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowercase__ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : int ):
lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
lowerCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
lowerCAmelCase : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase : Tuple = pipe(UpperCAmelCase_ , 'anime turle' , generator=UpperCAmelCase_ , output_type='np' )
lowerCAmelCase : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
lowerCAmelCase : Dict = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase : Any = pipe(UpperCAmelCase_ , 'anime turle' , generator=UpperCAmelCase_ , output_type='np' )
lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
lowerCAmelCase : Optional[Any] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase : Union[str, Any] = pipe(
UpperCAmelCase_ , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 323
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = 0, _UpperCAmelCase = 0 ) -> list:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = end or len(_UpperCAmelCase )
for i in range(_UpperCAmelCase, _UpperCAmelCase ):
lowerCAmelCase : List[Any] = i
lowerCAmelCase : List[Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCAmelCase : Dict = array[temp_index - 1]
temp_index -= 1
lowerCAmelCase : Tuple = temp_index_value
return array
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> None: # Max Heap
'''simple docstring'''
lowerCAmelCase : List[Any] = index
lowerCAmelCase : int = 2 * index + 1 # Left Node
lowerCAmelCase : Dict = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCAmelCase : Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCAmelCase : Tuple = right_index
if largest != index:
lowerCAmelCase , lowerCAmelCase : Dict = array[largest], array[index]
heapify(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> list:
'''simple docstring'''
lowerCAmelCase : Dict = len(_UpperCAmelCase )
for i in range(n // 2, -1, -1 ):
heapify(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
for i in range(n - 1, 0, -1 ):
lowerCAmelCase , lowerCAmelCase : Dict = array[0], array[i]
heapify(_UpperCAmelCase, 0, _UpperCAmelCase )
return array
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Tuple = low
lowerCAmelCase : Optional[Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCAmelCase , lowerCAmelCase : int = array[j], array[i]
i += 1
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return array
lowerCAmelCase : Optional[Any] = 2 * math.ceil(math.loga(len(_UpperCAmelCase ) ) )
lowerCAmelCase : Tuple = 16
return intro_sort(_UpperCAmelCase, 0, len(_UpperCAmelCase ), _UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_UpperCAmelCase )
max_depth -= 1
lowerCAmelCase : List[str] = median_of_a(_UpperCAmelCase, _UpperCAmelCase, start + ((end - start) // 2) + 1, end - 1 )
lowerCAmelCase : Dict = partition(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
intro_sort(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : str = p
return insertion_sort(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Any = input('''Enter numbers separated by a comma : ''').strip()
__A : Optional[int] = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 323
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Any = 13
lowerCAmelCase : Union[str, Any] = 7
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[Any] = 32
lowerCAmelCase : List[str] = 2
lowerCAmelCase : str = 4
lowerCAmelCase : Optional[Any] = 37
lowerCAmelCase : List[Any] = 'gelu'
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Optional[Any] = 512
lowerCAmelCase : Dict = 16
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Union[str, Any] = 0.02
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 4
lowerCAmelCase : Any = None
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = TFRoFormerModel(config=UpperCAmelCase_ )
lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : str = [input_ids, input_mask]
lowerCAmelCase : Any = model(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = True
lowerCAmelCase : List[str] = TFRoFormerForCausalLM(config=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : List[str] = model(UpperCAmelCase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : str = self.num_labels
lowerCAmelCase : Optional[Any] = TFRoFormerForSequenceClassification(config=UpperCAmelCase_ )
lowerCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : str = TFRoFormerForMultipleChoice(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : Any = TFRoFormerForTokenClassification(config=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[int] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase_ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = TFRoFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase : Any = 50000
lowerCAmelCase : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = tf.constant([[4, 10]] )
lowerCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase : int = emba(input_ids.shape )
lowerCAmelCase : str = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : List[Any] ):
# 2,12,16,64
lowerCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : List[str] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase : List[Any] = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase , lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
| 323
| 1
|
from collections import defaultdict
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> bool:
'''simple docstring'''
lowerCAmelCase : Any = first_str.lower().strip()
lowerCAmelCase : List[str] = second_str.lower().strip()
# Remove whitespace
lowerCAmelCase : Optional[Any] = first_str.replace(' ', '' )
lowerCAmelCase : str = second_str.replace(' ', '' )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
lowerCAmelCase : defaultdict[str, int] = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__A : Any = input('''Enter the first string ''').strip()
__A : Union[str, Any] = input('''Enter the second string ''').strip()
__A : Any = check_anagrams(input_a, input_b)
print(F'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 323
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : torch.FloatTensor
class __A ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[Any]=("DownEncoderBlock2D",) , UpperCAmelCase_ : Optional[int]=(64,) , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : int="silu" , UpperCAmelCase_ : Union[str, Any]=True , ):
super().__init__()
lowerCAmelCase : List[str] = layers_per_block
lowerCAmelCase : Dict = torch.nn.Convad(
UpperCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Any = nn.ModuleList([] )
# down
lowerCAmelCase : List[Any] = block_out_channels[0]
for i, down_block_type in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : List[str] = output_channel
lowerCAmelCase : int = block_out_channels[i]
lowerCAmelCase : str = i == len(UpperCAmelCase_ ) - 1
lowerCAmelCase : Any = get_down_block(
UpperCAmelCase_ , num_layers=self.layers_per_block , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCAmelCase_ , resnet_groups=UpperCAmelCase_ , attention_head_dim=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
self.down_blocks.append(UpperCAmelCase_ )
# mid
lowerCAmelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
# out
lowerCAmelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCAmelCase_ , eps=1E-6 )
lowerCAmelCase : Any = nn.SiLU()
lowerCAmelCase : List[Any] = 2 * out_channels if double_z else out_channels
lowerCAmelCase : Optional[int] = nn.Convad(block_out_channels[-1] , UpperCAmelCase_ , 3 , padding=1 )
lowerCAmelCase : Union[str, Any] = False
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int ):
lowerCAmelCase : Any = x
lowerCAmelCase : Dict = self.conv_in(UpperCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase_ : List[Any] ):
def custom_forward(*UpperCAmelCase_ : List[Any] ):
return module(*UpperCAmelCase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
lowerCAmelCase : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
# middle
lowerCAmelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
else:
for down_block in self.down_blocks:
lowerCAmelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ )
# middle
lowerCAmelCase : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
lowerCAmelCase : str = down_block(UpperCAmelCase_ )
# middle
lowerCAmelCase : Tuple = self.mid_block(UpperCAmelCase_ )
# post-process
lowerCAmelCase : int = self.conv_norm_out(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = self.conv_act(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.conv_out(UpperCAmelCase_ )
return sample
class __A ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : str=("UpDecoderBlock2D",) , UpperCAmelCase_ : Any=(64,) , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Optional[int]="silu" , UpperCAmelCase_ : Dict="group" , ):
super().__init__()
lowerCAmelCase : Tuple = layers_per_block
lowerCAmelCase : str = nn.Convad(
UpperCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCAmelCase : str = None
lowerCAmelCase : Dict = nn.ModuleList([] )
lowerCAmelCase : Optional[int] = in_channels if norm_type == 'spatial' else None
# mid
lowerCAmelCase : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
# up
lowerCAmelCase : Any = list(reversed(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Union[str, Any] = output_channel
lowerCAmelCase : Dict = reversed_block_out_channels[i]
lowerCAmelCase : Any = i == len(UpperCAmelCase_ ) - 1
lowerCAmelCase : Tuple = get_up_block(
UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCAmelCase_ , resnet_groups=UpperCAmelCase_ , attention_head_dim=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , resnet_time_scale_shift=UpperCAmelCase_ , )
self.up_blocks.append(UpperCAmelCase_ )
lowerCAmelCase : List[str] = output_channel
# out
if norm_type == "spatial":
lowerCAmelCase : str = SpatialNorm(block_out_channels[0] , UpperCAmelCase_ )
else:
lowerCAmelCase : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCAmelCase_ , eps=1E-6 )
lowerCAmelCase : int = nn.SiLU()
lowerCAmelCase : str = nn.Convad(block_out_channels[0] , UpperCAmelCase_ , 3 , padding=1 )
lowerCAmelCase : Union[str, Any] = False
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=None ):
lowerCAmelCase : Any = z
lowerCAmelCase : str = self.conv_in(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase_ : Optional[Any] ):
def custom_forward(*UpperCAmelCase_ : Union[str, Any] ):
return module(*UpperCAmelCase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
lowerCAmelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase_ , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
lowerCAmelCase : Dict = sample.to(UpperCAmelCase_ )
# up
for up_block in self.up_blocks:
lowerCAmelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
else:
# middle
lowerCAmelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = sample.to(UpperCAmelCase_ )
# up
for up_block in self.up_blocks:
lowerCAmelCase : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
else:
# middle
lowerCAmelCase : str = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : int = sample.to(UpperCAmelCase_ )
# up
for up_block in self.up_blocks:
lowerCAmelCase : int = up_block(UpperCAmelCase_ , UpperCAmelCase_ )
# post-process
if latent_embeds is None:
lowerCAmelCase : Dict = self.conv_norm_out(UpperCAmelCase_ )
else:
lowerCAmelCase : str = self.conv_norm_out(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = self.conv_act(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = self.conv_out(UpperCAmelCase_ )
return sample
class __A ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict="random" , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]=True ):
super().__init__()
lowerCAmelCase : Dict = n_e
lowerCAmelCase : Optional[int] = vq_embed_dim
lowerCAmelCase : Optional[Any] = beta
lowerCAmelCase : List[Any] = legacy
lowerCAmelCase : Union[str, Any] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCAmelCase : Optional[Any] = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
lowerCAmelCase : Any = self.used.shape[0]
lowerCAmelCase : Union[str, Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCAmelCase : List[Any] = self.re_embed
lowerCAmelCase : int = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
lowerCAmelCase : Any = n_e
lowerCAmelCase : str = sane_index_shape
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : List[str] ):
lowerCAmelCase : Union[str, Any] = inds.shape
assert len(UpperCAmelCase_ ) > 1
lowerCAmelCase : Optional[int] = inds.reshape(ishape[0] , -1 )
lowerCAmelCase : Optional[Any] = self.used.to(UpperCAmelCase_ )
lowerCAmelCase : Any = (inds[:, :, None] == used[None, None, ...]).long()
lowerCAmelCase : int = match.argmax(-1 )
lowerCAmelCase : List[Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCAmelCase : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCAmelCase : List[str] = self.unknown_index
return new.reshape(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : str ):
lowerCAmelCase : Tuple = inds.shape
assert len(UpperCAmelCase_ ) > 1
lowerCAmelCase : Dict = inds.reshape(ishape[0] , -1 )
lowerCAmelCase : Optional[int] = self.used.to(UpperCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
lowerCAmelCase : List[Any] = 0 # simply set to zero
lowerCAmelCase : List[str] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCAmelCase_ )
return back.reshape(UpperCAmelCase_ )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
# reshape z -> (batch, height, width, channel) and flatten
lowerCAmelCase : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCAmelCase : Tuple = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCAmelCase : List[str] = torch.argmin(torch.cdist(UpperCAmelCase_ , self.embedding.weight ) , dim=1 )
lowerCAmelCase : str = self.embedding(UpperCAmelCase_ ).view(z.shape )
lowerCAmelCase : Dict = None
lowerCAmelCase : Optional[Any] = None
# compute loss for embedding
if not self.legacy:
lowerCAmelCase : int = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCAmelCase : Optional[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCAmelCase : Dict = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCAmelCase : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCAmelCase : Any = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCAmelCase : Optional[Any] = self.remap_to_used(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCAmelCase : Optional[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowercase__ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowerCAmelCase : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowerCAmelCase : Tuple = self.unmap_to_all(UpperCAmelCase_ )
lowerCAmelCase : Tuple = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCAmelCase : Any = self.embedding(UpperCAmelCase_ )
if shape is not None:
lowerCAmelCase : Optional[Any] = z_q.view(UpperCAmelCase_ )
# reshape back to match original input shape
lowerCAmelCase : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __A ( lowerCAmelCase ):
def __init__( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any]=False ):
lowerCAmelCase : Tuple = parameters
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = torch.chunk(UpperCAmelCase_ , 2 , dim=1 )
lowerCAmelCase : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowerCAmelCase : str = deterministic
lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * self.logvar )
lowerCAmelCase : Any = torch.exp(self.logvar )
if self.deterministic:
lowerCAmelCase : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowercase__ ( self : int , UpperCAmelCase_ : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
lowerCAmelCase : Dict = randn_tensor(
self.mean.shape , generator=UpperCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCAmelCase : Any = self.mean + self.std * sample
return x
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowerCAmelCase : int = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
return self.mean
| 323
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> list[int]:
'''simple docstring'''
if num <= 0:
lowerCAmelCase : List[str] = f"{num}: Invalid input, please enter a positive integer."
raise ValueError(_UpperCAmelCase )
lowerCAmelCase : Any = [True] * (num + 1)
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : List[str] = 2
lowerCAmelCase : List[str] = int(math.sqrt(_UpperCAmelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_UpperCAmelCase )
# Set multiples of start be False
for i in range(start * start, num + 1, _UpperCAmelCase ):
if sieve[i] is True:
lowerCAmelCase : str = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(_UpperCAmelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 323
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323
| 1
|
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return x + 2
class __A ( unittest.TestCase ):
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = 'x = 3'
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
lowerCAmelCase : Dict = 'x = y'
lowerCAmelCase : List[Any] = {'y': 5}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 5, 'y': 5} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = 'y = add_two(x)'
lowerCAmelCase : int = {'x': 3}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 'x = 3'
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = 'x = 3\ny = 5'
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = 'text = f\'This is x: {x}.\''
lowerCAmelCase : str = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'text': 'This is x: 3.'} )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 2} )
lowerCAmelCase : Any = {'x': 8}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 8, 'y': 5} )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = 'test_list = [x, add_two(x)]'
lowerCAmelCase : Optional[Any] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [3, 5] )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : int = 'y = x'
lowerCAmelCase : Optional[int] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 3} )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = 'test_list = [x, add_two(x)]\ntest_list[1]'
lowerCAmelCase : List[str] = {'x': 3}
lowerCAmelCase : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
lowerCAmelCase : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
lowerCAmelCase : List[Any] = {'x': 3}
lowerCAmelCase : Optional[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = 'x = 0\nfor i in range(3):\n x = i'
lowerCAmelCase : str = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {'range': range} , state=UpperCAmelCase_ )
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 2, 'i': 2} )
| 323
| 1
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __A ( unittest.TestCase ):
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
lowerCAmelCase : str = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(UpperCAmelCase_ )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = logging.get_verbosity()
lowerCAmelCase : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
lowerCAmelCase : str = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(UpperCAmelCase_ ) as cl:
logger.warning(UpperCAmelCase_ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(UpperCAmelCase_ ) as cl:
logger.warning(UpperCAmelCase_ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(UpperCAmelCase_ ) as cl:
logger.warning(UpperCAmelCase_ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(UpperCAmelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def lowercase__ ( self : Any ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowerCAmelCase : Tuple = logging.get_logger('transformers.models.bart.tokenization_bart' )
lowerCAmelCase : Any = os.getenv('TRANSFORMERS_VERBOSITY' , UpperCAmelCase_ )
lowerCAmelCase : Any = logging.log_levels[env_level_str]
lowerCAmelCase : int = logging.get_verbosity()
self.assertEqual(
UpperCAmelCase_ , UpperCAmelCase_ , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
lowerCAmelCase : Tuple = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def lowercase__ ( self : int ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
lowerCAmelCase : Dict = logging.logging.getLogger()
with CaptureLogger(UpperCAmelCase_ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def lowercase__ ( self : Optional[int] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
lowerCAmelCase : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
lowerCAmelCase : Tuple = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(UpperCAmelCase_ ) as cl:
logger.warning_advice(UpperCAmelCase_ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(UpperCAmelCase_ ) as cl:
logger.warning_advice(UpperCAmelCase_ )
self.assertEqual(cl.out , msg + '\n' )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 323
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323
| 1
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __A :
lowerCAmelCase_ : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
lowerCAmelCase_ : Optional[str] = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
lowerCAmelCase_ : Optional[str] = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
lowerCAmelCase_ : Optional[str] = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
lowerCAmelCase_ : Optional[int] = field(default=2 , metadata={"help": "Batch size for training."} )
lowerCAmelCase_ : Optional[int] = field(default=2 , metadata={"help": "Batch size for evaluation."} )
lowerCAmelCase_ : Optional[float] = field(default=0.1 , metadata={"help": "Value of weight decay."} )
lowerCAmelCase_ : Optional[int] = field(
default=1_0000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
lowerCAmelCase_ : Optional[float] = field(default=2E-4 , metadata={"help": "Learning rate fo training."} )
lowerCAmelCase_ : Optional[str] = field(default="cosine" , metadata={"help": "Learning rate."} )
lowerCAmelCase_ : Optional[int] = field(
default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
lowerCAmelCase_ : Optional[int] = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
lowerCAmelCase_ : Optional[bool] = field(
default=lowerCAmelCase , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
lowerCAmelCase_ : Optional[int] = field(default=5_0000 , metadata={"help": "Maximum number of training steps."} )
lowerCAmelCase_ : Optional[int] = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
lowerCAmelCase_ : Optional[int] = field(default=1024 , metadata={"help": "Sequence lengths used for training."} )
lowerCAmelCase_ : Optional[int] = field(default=1 , metadata={"help": "Training seed."} )
lowerCAmelCase_ : Optional[int] = field(
default=1024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
lowerCAmelCase_ : Optional[bool] = field(default=lowerCAmelCase , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class __A :
lowerCAmelCase_ : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
lowerCAmelCase_ : Optional[str] = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
lowerCAmelCase_ : Optional[int] = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
lowerCAmelCase_ : Optional[int] = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
lowerCAmelCase_ : Optional[int] = field(default=1024 , metadata={"help": "Length of sequences to be evaluated."} )
lowerCAmelCase_ : Optional[int] = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class __A :
lowerCAmelCase_ : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
lowerCAmelCase_ : Optional[int] = field(default=lowerCAmelCase , metadata={"help": "Number of workers used for code evaluation."} )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
lowerCAmelCase_ : Optional[bool] = field(
default=lowerCAmelCase , metadata={"help": "Sample from the language model's output distribution."} )
lowerCAmelCase_ : Optional[float] = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
lowerCAmelCase_ : Optional[int] = field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} )
lowerCAmelCase_ : Optional[int] = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
lowerCAmelCase_ : Optional[float] = field(default=0.9_5 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
lowerCAmelCase_ : Optional[int] = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
lowerCAmelCase_ : Optional[int] = field(
default=200 , metadata={"help": "Number of completions to generate for each sample."} )
lowerCAmelCase_ : Optional[int] = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
lowerCAmelCase_ : Optional[str] = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
lowerCAmelCase_ : Optional[str] = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
lowerCAmelCase_ : Optional[int] = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class __A :
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
lowerCAmelCase_ : Optional[str] = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
lowerCAmelCase_ : Optional[str] = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
lowerCAmelCase_ : Optional[int] = field(
default=10_0000 , metadata={"help": "Number of files to save per JSON output file."} )
lowerCAmelCase_ : Optional[str] = field(default="content" , metadata={"help": "Column containing text data to process."} )
lowerCAmelCase_ : Optional[float] = field(
default=1000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
lowerCAmelCase_ : Optional[float] = field(
default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
lowerCAmelCase_ : Optional[float] = field(
default=0.2_5 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
lowerCAmelCase_ : Optional[float] = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
lowerCAmelCase_ : Optional[float] = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
lowerCAmelCase_ : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
lowerCAmelCase_ : Optional[bool] = field(
default=lowerCAmelCase , metadata={"help": "If True, near-duplicate samples are removed."} )
lowerCAmelCase_ : Optional[float] = field(
default=0.8_5 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class __A :
lowerCAmelCase_ : Optional[str] = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
lowerCAmelCase_ : Optional[str] = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
lowerCAmelCase_ : Optional[str] = field(default="content" , metadata={"help": "Column containing text data to process."} )
lowerCAmelCase_ : Optional[int] = field(default=20_0000 , metadata={"help": "Number of examples to train tokenizer on."} )
lowerCAmelCase_ : Optional[int] = field(
default=3_2768 , metadata={"help": "Number of examples to train the tokenizer on."} )
lowerCAmelCase_ : Optional[str] = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
lowerCAmelCase_ : Optional[bool] = field(default=lowerCAmelCase , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class __A :
lowerCAmelCase_ : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
lowerCAmelCase_ : Optional[str] = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
lowerCAmelCase_ : Optional[str] = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
lowerCAmelCase_ : Optional[int] = field(default=lowerCAmelCase , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class __A :
lowerCAmelCase_ : Optional[str] = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
lowerCAmelCase_ : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
lowerCAmelCase_ : Optional[str] = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
lowerCAmelCase_ : Optional[bool] = field(default=lowerCAmelCase , metadata={"help": "Push saved tokenizer to the hub."} )
| 323
|
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__A : Optional[Any] = random.Random()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=1.0, _UpperCAmelCase=None, _UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
if rng is None:
lowerCAmelCase : Tuple = global_rng
lowerCAmelCase : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __A ( unittest.TestCase ):
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Dict=400 , UpperCAmelCase_ : str=2000 , UpperCAmelCase_ : Any=24 , UpperCAmelCase_ : str=24 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=16000 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=True , ):
lowerCAmelCase : List[str] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : List[str] = min_seq_length
lowerCAmelCase : Union[str, Any] = max_seq_length
lowerCAmelCase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase : List[str] = feature_size
lowerCAmelCase : Union[str, Any] = num_mel_bins
lowerCAmelCase : Tuple = padding_value
lowerCAmelCase : Dict = sampling_rate
lowerCAmelCase : int = return_attention_mask
lowerCAmelCase : List[str] = do_normalize
def lowercase__ ( self : Any ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self : Dict , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Tuple=False ):
def _flatten(UpperCAmelCase_ : Any ):
return list(itertools.chain(*UpperCAmelCase_ ) )
if equal_length:
lowerCAmelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase : Optional[int] = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = SpeechaTextFeatureExtractionTester(self )
def lowercase__ ( self : Dict , UpperCAmelCase_ : str ):
self.assertTrue(np.all(np.mean(UpperCAmelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def lowercase__ ( self : Dict ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCAmelCase : Optional[Any] = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase : int = feature_extractor(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase : Any = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
lowerCAmelCase : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test batched
lowerCAmelCase : str = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
lowerCAmelCase : List[str] = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase : str = np.asarray(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
lowerCAmelCase : Any = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCAmelCase : Dict = ['longest', 'max_length', 'do_not_pad']
lowerCAmelCase : Optional[int] = [None, 16, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : List[Any] = feature_extractor(
UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ )
lowerCAmelCase : str = inputs.input_features
lowerCAmelCase : Tuple = inputs.attention_mask
lowerCAmelCase : Dict = [np.sum(UpperCAmelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self : str ):
lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCAmelCase : Optional[Any] = ['longest', 'max_length', 'do_not_pad']
lowerCAmelCase : Union[str, Any] = [None, 16, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Dict = feature_extractor(
UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors='np' , return_attention_mask=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = inputs.input_features
lowerCAmelCase : int = inputs.attention_mask
lowerCAmelCase : Union[str, Any] = [np.sum(UpperCAmelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCAmelCase : str = feature_extractor(
UpperCAmelCase_ , padding='max_length' , max_length=4 , truncation=UpperCAmelCase_ , return_tensors='np' , return_attention_mask=UpperCAmelCase_ , )
lowerCAmelCase : Any = inputs.input_features
lowerCAmelCase : Dict = inputs.attention_mask
lowerCAmelCase : List[str] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCAmelCase : str = feature_extractor(
UpperCAmelCase_ , padding='longest' , max_length=4 , truncation=UpperCAmelCase_ , return_tensors='np' , return_attention_mask=UpperCAmelCase_ , )
lowerCAmelCase : str = inputs.input_features
lowerCAmelCase : Tuple = inputs.attention_mask
lowerCAmelCase : Union[str, Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCAmelCase : List[Any] = feature_extractor(
UpperCAmelCase_ , padding='longest' , max_length=16 , truncation=UpperCAmelCase_ , return_tensors='np' , return_attention_mask=UpperCAmelCase_ , )
lowerCAmelCase : List[Any] = inputs.input_features
lowerCAmelCase : List[str] = inputs.attention_mask
lowerCAmelCase : Optional[int] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def lowercase__ ( self : Tuple ):
import torch
lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : str = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase : List[Any] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase : List[Any] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[str] ):
from datasets import load_dataset
lowerCAmelCase : Union[str, Any] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase : int = ds.sort('id' ).select(range(UpperCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowercase__ ( self : Union[str, Any] ):
# fmt: off
lowerCAmelCase : int = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
lowerCAmelCase : List[Any] = self._load_datasamples(1 )
lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Union[str, Any] = feature_extractor(UpperCAmelCase_ , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , UpperCAmelCase_ , atol=1E-4 ) )
| 323
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 323
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323
| 1
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : Tuple = emb.weight.shape
lowerCAmelCase : Tuple = nn.Linear(_UpperCAmelCase, _UpperCAmelCase, bias=_UpperCAmelCase )
lowerCAmelCase : Any = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = torch.load(_UpperCAmelCase, map_location='cpu' )
lowerCAmelCase : Optional[int] = Namespace(**checkpoint['cfg']['model'] )
lowerCAmelCase : Dict = checkpoint['model']
remove_ignore_keys_(_UpperCAmelCase )
lowerCAmelCase : str = state_dict['decoder.embed_tokens.weight'].shape[0]
lowerCAmelCase : Tuple = {key.replace('decoder', 'model' ): val for key, val in state_dict.items()}
lowerCAmelCase : Dict = XGLMConfig(
vocab_size=_UpperCAmelCase, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='gelu', scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
lowerCAmelCase : List[Any] = XGLMForCausalLM(_UpperCAmelCase )
lowerCAmelCase : List[Any] = model.load_state_dict(_UpperCAmelCase, strict=_UpperCAmelCase )
print(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__A : Optional[int] = parser.parse_args()
__A : List[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 323
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 323
| 1
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self : Tuple , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int = None , UpperCAmelCase_ : int = None ):
super().__init__()
lowerCAmelCase : int = pad_token_id
lowerCAmelCase : int = max_length
lowerCAmelCase : Optional[Any] = vocab
lowerCAmelCase : List[Any] = merges
lowerCAmelCase : Tuple = BytePairTokenizer(UpperCAmelCase_ , UpperCAmelCase_ , sequence_length=UpperCAmelCase_ )
@classmethod
def lowercase__ ( cls : Any , UpperCAmelCase_ : GPTaTokenizer , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : List[Any] = [' '.join(UpperCAmelCase_ ) for m in tokenizer.bpe_ranks.keys()]
lowerCAmelCase : Union[str, Any] = tokenizer.get_vocab()
return cls(UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
@classmethod
def lowercase__ ( cls : Any , UpperCAmelCase_ : Union[str, os.PathLike] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = GPTaTokenizer.from_pretrained(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
return cls.from_tokenizer(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
@classmethod
def lowercase__ ( cls : str , UpperCAmelCase_ : int ):
return cls(**UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int = None ):
lowerCAmelCase : Union[str, Any] = self.tf_tokenizer(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tf.ones_like(UpperCAmelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCAmelCase : Any = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCAmelCase , lowerCAmelCase : Tuple = pad_model_inputs(
UpperCAmelCase_ , max_seq_length=UpperCAmelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 323
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323
| 1
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : torch.FloatTensor
class __A ( lowerCAmelCase , lowerCAmelCase ):
@register_to_config
def __init__( self : List[Any] , UpperCAmelCase_ : int = 16 , UpperCAmelCase_ : int = 88 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "geglu" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ):
super().__init__()
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Optional[Any] = attention_head_dim
lowerCAmelCase : int = num_attention_heads * attention_head_dim
lowerCAmelCase : List[Any] = in_channels
lowerCAmelCase : int = torch.nn.GroupNorm(num_groups=UpperCAmelCase_ , num_channels=UpperCAmelCase_ , eps=1E-6 , affine=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
# 3. Define transformers blocks
lowerCAmelCase : Any = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , dropout=UpperCAmelCase_ , cross_attention_dim=UpperCAmelCase_ , activation_fn=UpperCAmelCase_ , attention_bias=UpperCAmelCase_ , double_self_attention=UpperCAmelCase_ , norm_elementwise_affine=UpperCAmelCase_ , )
for d in range(UpperCAmelCase_ )
] )
lowerCAmelCase : List[Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : bool = True , ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = hidden_states.shape
lowerCAmelCase : int = batch_frames // num_frames
lowerCAmelCase : Optional[Any] = hidden_states
lowerCAmelCase : int = hidden_states[None, :].reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : int = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCAmelCase : List[str] = self.norm(UpperCAmelCase_ )
lowerCAmelCase : Any = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.proj_in(UpperCAmelCase_ )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase : int = block(
UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , timestep=UpperCAmelCase_ , cross_attention_kwargs=UpperCAmelCase_ , class_labels=UpperCAmelCase_ , )
# 3. Output
lowerCAmelCase : List[str] = self.proj_out(UpperCAmelCase_ )
lowerCAmelCase : str = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCAmelCase : Dict = hidden_states.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[str] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase_ )
| 323
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[Any] = ''
else:
lowerCAmelCase : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : List[str] = dct.pop(_UpperCAmelCase )
lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : str = ViTMSNConfig()
lowerCAmelCase : str = 1_000
lowerCAmelCase : List[str] = 'datasets/huggingface/label-files'
lowerCAmelCase : int = 'imagenet-1k-id2label.json'
lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[Any] = 384
lowerCAmelCase : List[Any] = 1_536
lowerCAmelCase : Union[str, Any] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase : List[Any] = 1_024
lowerCAmelCase : Any = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : Any = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase : Any = 4
elif "l7" in checkpoint_url:
lowerCAmelCase : int = 7
lowerCAmelCase : str = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Tuple = 16
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : List[str] = ViTMSNModel(_UpperCAmelCase )
lowerCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )['target_encoder']
lowerCAmelCase : int = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
lowerCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase, base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase, base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Dict = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
lowerCAmelCase : Any = ViTImageProcessor(
size=config.image_size, image_mean=_UpperCAmelCase, image_std=_UpperCAmelCase )
lowerCAmelCase : List[Any] = image_processor(images=_UpperCAmelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _UpperCAmelCase, atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : List[str] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __A ( lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = "dinat"
lowerCAmelCase_ : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : Dict=[2, 4, 8, 16] , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase_ : int=3.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : str = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : List[Any] = len(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = num_heads
lowerCAmelCase : Tuple = kernel_size
lowerCAmelCase : List[str] = dilations
lowerCAmelCase : Any = mlp_ratio
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase : int = layer_scale_init_value
lowerCAmelCase : Optional[Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 323
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : Tuple = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if "cls_token" in name:
lowerCAmelCase : str = name.replace('cls_token', 'vit.embeddings.cls_token' )
if "mask_token" in name:
lowerCAmelCase : str = name.replace('mask_token', 'decoder.mask_token' )
if "decoder_pos_embed" in name:
lowerCAmelCase : List[str] = name.replace('decoder_pos_embed', 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase : Dict = name.replace('pos_embed', 'vit.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace('patch_embed.proj', 'vit.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase : Union[str, Any] = name.replace('patch_embed.norm', 'vit.embeddings.norm' )
if "decoder_blocks" in name:
lowerCAmelCase : Dict = name.replace('decoder_blocks', 'decoder.decoder_layers' )
if "blocks" in name:
lowerCAmelCase : Tuple = name.replace('blocks', 'vit.encoder.layer' )
if "attn.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase : Optional[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
lowerCAmelCase : Any = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase : Dict = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase : Tuple = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase : Union[str, Any] = name.replace('mlp.fc2', 'output.dense' )
if "decoder_embed" in name:
lowerCAmelCase : Dict = name.replace('decoder_embed', 'decoder.decoder_embed' )
if "decoder_norm" in name:
lowerCAmelCase : Tuple = name.replace('decoder_norm', 'decoder.decoder_norm' )
if "decoder_pred" in name:
lowerCAmelCase : Optional[Any] = name.replace('decoder_pred', 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name:
lowerCAmelCase : List[Any] = name.replace('norm.weight', 'vit.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name:
lowerCAmelCase : Optional[Any] = name.replace('norm.bias', 'vit.layernorm.bias' )
return name
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Union[str, Any] = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowerCAmelCase : Any = key.split('.' )
lowerCAmelCase : Optional[Any] = int(key_split[1] )
if "decoder_blocks" in key:
lowerCAmelCase : Optional[int] = config.decoder_hidden_size
lowerCAmelCase : str = 'decoder.decoder_layers.'
if "weight" in key:
lowerCAmelCase : Tuple = val[:dim, :]
lowerCAmelCase : Dict = val[dim : dim * 2, :]
lowerCAmelCase : Optional[int] = val[-dim:, :]
elif "bias" in key:
lowerCAmelCase : List[Any] = val[:dim]
lowerCAmelCase : Dict = val[dim : dim * 2]
lowerCAmelCase : Any = val[-dim:]
else:
lowerCAmelCase : Optional[int] = config.hidden_size
lowerCAmelCase : int = 'vit.encoder.layer.'
if "weight" in key:
lowerCAmelCase : Tuple = val[:dim, :]
lowerCAmelCase : List[Any] = val[dim : dim * 2, :]
lowerCAmelCase : List[str] = val[-dim:, :]
elif "bias" in key:
lowerCAmelCase : List[Any] = val[:dim]
lowerCAmelCase : Tuple = val[dim : dim * 2]
lowerCAmelCase : List[str] = val[-dim:]
else:
lowerCAmelCase : int = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCAmelCase : int = 1_024
lowerCAmelCase : Any = 4_096
lowerCAmelCase : Dict = 24
lowerCAmelCase : str = 16
elif "huge" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = 14
lowerCAmelCase : str = 1_280
lowerCAmelCase : int = 5_120
lowerCAmelCase : Any = 32
lowerCAmelCase : Union[str, Any] = 16
lowerCAmelCase : Optional[int] = ViTMAEForPreTraining(_UpperCAmelCase )
lowerCAmelCase : List[str] = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )['model']
lowerCAmelCase : Optional[Any] = ViTMAEImageProcessor(size=config.image_size )
lowerCAmelCase : List[Any] = convert_state_dict(_UpperCAmelCase, _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase : List[Any] = 'https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'
lowerCAmelCase : int = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
lowerCAmelCase : Union[str, Any] = ViTMAEImageProcessor(size=config.image_size )
lowerCAmelCase : int = image_processor(images=_UpperCAmelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Optional[int] = model(**_UpperCAmelCase )
lowerCAmelCase : int = outputs.logits
if "large" in checkpoint_url:
lowerCAmelCase : str = torch.tensor(
[[-0.7_3_0_9, -0.7_1_2_8, -1.0_1_6_9], [-1.0_1_6_1, -0.9_0_5_8, -1.1_8_7_8], [-1.0_4_7_8, -0.9_4_1_1, -1.1_9_1_1]] )
elif "huge" in checkpoint_url:
lowerCAmelCase : Tuple = torch.tensor(
[[-1.1_5_9_9, -0.9_1_9_9, -1.2_2_2_1], [-1.1_9_5_2, -0.9_2_6_9, -1.2_3_0_7], [-1.2_1_4_3, -0.9_3_3_7, -1.2_2_6_2]] )
else:
lowerCAmelCase : List[Any] = torch.tensor(
[[-0.9_1_9_2, -0.8_4_8_1, -1.1_2_5_9], [-1.1_3_4_9, -1.0_0_3_4, -1.2_5_9_9], [-1.1_7_5_7, -1.0_4_2_9, -1.2_7_2_6]] )
# verify logits
assert torch.allclose(logits[0, :3, :3], _UpperCAmelCase, atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : Tuple = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 323
| 1
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = 'ylacombe/bark-small'
lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase : int = 'en_speaker_1'
lowerCAmelCase : Union[str, Any] = 'This is a test string'
lowerCAmelCase : Any = 'speaker_embeddings_path.json'
lowerCAmelCase : Any = 'speaker_embeddings'
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : Optional[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = self.get_tokenizer()
lowerCAmelCase : Dict = BarkProcessor(tokenizer=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCAmelCase : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Dict = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCAmelCase : Dict = 35
lowerCAmelCase : Any = 2
lowerCAmelCase : str = 8
lowerCAmelCase : Optional[int] = {
'semantic_prompt': np.ones(UpperCAmelCase_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCAmelCase : List[str] = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
lowerCAmelCase : Any = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Tuple = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
lowerCAmelCase : Tuple = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCAmelCase : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.get_tokenizer()
lowerCAmelCase : Optional[int] = BarkProcessor(tokenizer=UpperCAmelCase_ )
lowerCAmelCase : int = processor(text=self.input_string )
lowerCAmelCase : str = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 323
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __A ( lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = "dinat"
lowerCAmelCase_ : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : Dict=[2, 4, 8, 16] , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase_ : int=3.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : str = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : List[Any] = len(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = num_heads
lowerCAmelCase : Tuple = kernel_size
lowerCAmelCase : List[str] = dilations
lowerCAmelCase : Any = mlp_ratio
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase : int = layer_scale_init_value
lowerCAmelCase : Optional[Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 323
| 1
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Any = 13
lowerCAmelCase : Union[str, Any] = 7
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[Any] = 32
lowerCAmelCase : List[str] = 2
lowerCAmelCase : str = 4
lowerCAmelCase : Optional[Any] = 37
lowerCAmelCase : List[Any] = 'gelu'
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Optional[Any] = 512
lowerCAmelCase : Dict = 16
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Union[str, Any] = 0.02
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 4
lowerCAmelCase : Any = None
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = TFRoFormerModel(config=UpperCAmelCase_ )
lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : str = [input_ids, input_mask]
lowerCAmelCase : Any = model(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = True
lowerCAmelCase : List[str] = TFRoFormerForCausalLM(config=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : List[str] = model(UpperCAmelCase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : str = self.num_labels
lowerCAmelCase : Optional[Any] = TFRoFormerForSequenceClassification(config=UpperCAmelCase_ )
lowerCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : str = TFRoFormerForMultipleChoice(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : Any = TFRoFormerForTokenClassification(config=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[int] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase_ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = TFRoFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase : Any = 50000
lowerCAmelCase : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = tf.constant([[4, 10]] )
lowerCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase : int = emba(input_ids.shape )
lowerCAmelCase : str = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : List[Any] ):
# 2,12,16,64
lowerCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : List[str] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase : List[Any] = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase , lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
| 323
|
from manim import *
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : List[str] = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : str = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : int = [mem.copy() for i in range(4 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = Text('GPU' , font_size=24 )
lowerCAmelCase : Tuple = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : List[str] = Text('Model' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
for i, rect in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.8 )
target.move_to(UpperCAmelCase_ )
model_arr.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Tuple = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Union[str, Any] = Text('Disk' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
lowerCAmelCase : str = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = Square(0.3 )
input.set_fill(UpperCAmelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase_ , buff=0.5 )
self.play(Write(UpperCAmelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase_ , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase_ ) )
self.play(FadeOut(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = Arrow(start=UpperCAmelCase_ , end=UpperCAmelCase_ , color=UpperCAmelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase : int = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
lowerCAmelCase : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase_ ) , Circumscribe(model_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase : int = AnimationGroup(
FadeOut(UpperCAmelCase_ , run_time=0.5 ) , MoveToTarget(UpperCAmelCase_ , run_time=0.5 ) , FadeIn(UpperCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase : int = a_c
lowerCAmelCase : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase_ ) , FadeOut(UpperCAmelCase_ , run_time=0.5 ) , )
lowerCAmelCase : int = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , MoveToTarget(UpperCAmelCase_ ) )
self.wait()
| 323
| 1
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
lowerCAmelCase_ : int
lowerCAmelCase_ : TreeNode | None = None
lowerCAmelCase_ : TreeNode | None = None
__A : Any = namedtuple('''CoinsDistribResult''', '''moves excess''')
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_UpperCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_UpperCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_UpperCAmelCase ) != count_coins(_UpperCAmelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(_UpperCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0, 1 )
lowerCAmelCase , lowerCAmelCase : List[str] = get_distrib(node.left )
lowerCAmelCase , lowerCAmelCase : Optional[int] = get_distrib(node.right )
lowerCAmelCase : Tuple = 1 - left_distrib_excess
lowerCAmelCase : List[str] = 1 - right_distrib_excess
lowerCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(_UpperCAmelCase )
+ abs(_UpperCAmelCase )
)
lowerCAmelCase : int = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_UpperCAmelCase, _UpperCAmelCase )
return get_distrib(_UpperCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__A : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( lowerCAmelCase ):
def __init__( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any]=768 ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = proj_size
lowerCAmelCase : int = CLIPVisionModel(UpperCAmelCase_ )
lowerCAmelCase : Any = PaintByExampleMapper(UpperCAmelCase_ )
lowerCAmelCase : Any = nn.LayerNorm(config.hidden_size )
lowerCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
lowerCAmelCase : Optional[Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=False ):
lowerCAmelCase : Optional[int] = self.model(pixel_values=UpperCAmelCase_ )
lowerCAmelCase : int = clip_output.pooler_output
lowerCAmelCase : Any = self.mapper(latent_states[:, None] )
lowerCAmelCase : Any = self.final_layer_norm(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = self.proj_out(UpperCAmelCase_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __A ( nn.Module ):
def __init__( self : str , UpperCAmelCase_ : List[Any] ):
super().__init__()
lowerCAmelCase : int = (config.num_hidden_layers + 1) // 5
lowerCAmelCase : Union[str, Any] = config.hidden_size
lowerCAmelCase : List[str] = 1
lowerCAmelCase : Any = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , activation_fn='gelu' , attention_bias=UpperCAmelCase_ )
for _ in range(UpperCAmelCase_ )
] )
def lowercase__ ( self : str , UpperCAmelCase_ : int ):
for block in self.blocks:
lowerCAmelCase : int = block(UpperCAmelCase_ )
return hidden_states
| 323
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A : Any = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__A : Tuple = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__A : Any = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__A : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
__A : Optional[Any] = np.expand_dims(test_image, axis=0)
__A : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A : Optional[int] = '''Normal'''
if result[0][0] == 1:
__A : str = '''Abnormality detected'''
| 323
| 1
|
from ..utils import DummyObject, requires_backends
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[str] = ["torch"]
def __init__( self : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Tuple = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Dict = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Any = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : int = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Dict = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[str] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[str] = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : int = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Any = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Tuple , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[int] = ["torch"]
def __init__( self : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Any = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Tuple = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Any = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[str] = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Dict = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : int = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Tuple = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[int] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : str = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Dict = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Tuple = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : int = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : int = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : int = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : str = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Any = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Tuple = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[int] = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : str = ["torch"]
def __init__( self : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ):
requires_backends(cls , ['torch'] )
| 323
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A : str = logging.getLogger(__name__)
class __A ( lowerCAmelCase ):
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None ):
lowerCAmelCase : List[Any] = self.layer[current_layer](UpperCAmelCase_ , UpperCAmelCase_ , head_mask[current_layer] )
lowerCAmelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = BertEncoderWithPabee(UpperCAmelCase_ )
self.init_weights()
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = 0
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
lowerCAmelCase : int = threshold
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = patience
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Tuple = 0
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(UpperCAmelCase_ )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if token_type_ids is None:
lowerCAmelCase : Union[str, Any] = torch.zeros(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = encoder_hidden_states.size()
lowerCAmelCase : Optional[int] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
lowerCAmelCase : Tuple = self.invert_attention_mask(UpperCAmelCase_ )
else:
lowerCAmelCase : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase : Optional[Any] = self.get_head_mask(UpperCAmelCase_ , self.config.num_hidden_layers )
lowerCAmelCase : int = self.embeddings(
input_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ )
lowerCAmelCase : List[str] = embedding_output
if self.training:
lowerCAmelCase : Tuple = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase : Dict = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : List[str] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = output_layers[i](output_dropout(UpperCAmelCase_ ) )
res.append(UpperCAmelCase_ )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase : Union[str, Any] = self.encoder(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = self.pooler(encoder_outputs[0] )
lowerCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase_ )]
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = output_layers[i](UpperCAmelCase_ )
if regression:
lowerCAmelCase : List[str] = logits.detach()
if patient_result is not None:
lowerCAmelCase : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase : Any = 0
else:
lowerCAmelCase : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase : Optional[Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase_ ) ):
patient_counter += 1
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[Any] = logits
if patient_counter == self.patience:
break
lowerCAmelCase : Dict = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Tuple = config.num_labels
lowerCAmelCase : int = BertModelWithPabee(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : List[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=None , ):
lowerCAmelCase : int = self.bert(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase : Any = (logits[-1],)
if labels is not None:
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[int] = 0
for ix, logits_item in enumerate(UpperCAmelCase_ ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Tuple = MSELoss()
lowerCAmelCase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : Tuple = CrossEntropyLoss()
lowerCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase : str = (total_loss / total_weights,) + outputs
return outputs
| 323
| 1
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__A : List[Any] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__A : List[Any] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ), dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase, 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowerCAmelCase : Tuple = _readaa(_UpperCAmelCase )
if magic != 2_051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
lowerCAmelCase : Optional[int] = _readaa(_UpperCAmelCase )
lowerCAmelCase : List[str] = _readaa(_UpperCAmelCase )
lowerCAmelCase : int = _readaa(_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = bytestream.read(rows * cols * num_images )
lowerCAmelCase : Tuple = numpy.frombuffer(_UpperCAmelCase, dtype=numpy.uinta )
lowerCAmelCase : str = data.reshape(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, 1 )
return data
@deprecated(_UpperCAmelCase, 'Please use tf.one_hot on tensors.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : int = labels_dense.shape[0]
lowerCAmelCase : Union[str, Any] = numpy.arange(_UpperCAmelCase ) * num_classes
lowerCAmelCase : List[str] = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase : Optional[int] = 1
return labels_one_hot
@deprecated(_UpperCAmelCase, 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=10 ) -> Union[str, Any]:
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowerCAmelCase : List[Any] = _readaa(_UpperCAmelCase )
if magic != 2_049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
lowerCAmelCase : Any = _readaa(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = bytestream.read(_UpperCAmelCase )
lowerCAmelCase : int = numpy.frombuffer(_UpperCAmelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase, _UpperCAmelCase )
return labels
class __A :
@deprecated(
UpperCAmelCase_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=dtypes.floataa , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[Any]=None , ):
lowerCAmelCase , lowerCAmelCase : Dict = random_seed.get_seed(UpperCAmelCase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase : Tuple = dtypes.as_dtype(UpperCAmelCase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
lowerCAmelCase : str = 10000
lowerCAmelCase : List[str] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase : Any = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase : Optional[Any] = images.astype(numpy.floataa )
lowerCAmelCase : Tuple = numpy.multiply(UpperCAmelCase_ , 1.0 / 2_55.0 )
lowerCAmelCase : Optional[Any] = images
lowerCAmelCase : Union[str, Any] = labels
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[Any] = 0
@property
def lowercase__ ( self : Tuple ):
return self._images
@property
def lowercase__ ( self : List[str] ):
return self._labels
@property
def lowercase__ ( self : Optional[int] ):
return self._num_examples
@property
def lowercase__ ( self : int ):
return self._epochs_completed
def lowercase__ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=True ):
if fake_data:
lowerCAmelCase : Any = [1] * 784
lowerCAmelCase : List[Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCAmelCase_ )],
[fake_label for _ in range(UpperCAmelCase_ )],
)
lowerCAmelCase : Any = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase : Optional[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.images[perma]
lowerCAmelCase : Union[str, Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase : Tuple = self._num_examples - start
lowerCAmelCase : Dict = self._images[start : self._num_examples]
lowerCAmelCase : Optional[Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase : Optional[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCAmelCase : int = self.images[perm]
lowerCAmelCase : Tuple = self.labels[perm]
# Start next epoch
lowerCAmelCase : List[str] = 0
lowerCAmelCase : int = batch_size - rest_num_examples
lowerCAmelCase : str = self._index_in_epoch
lowerCAmelCase : Optional[Any] = self._images[start:end]
lowerCAmelCase : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase : List[str] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase, 'Please write your own downloading logic.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase, _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
lowerCAmelCase : str = f.size()
print('Successfully downloaded', _UpperCAmelCase, _UpperCAmelCase, 'bytes.' )
return filepath
@deprecated(
_UpperCAmelCase, 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=dtypes.floataa, _UpperCAmelCase=True, _UpperCAmelCase=5_000, _UpperCAmelCase=None, _UpperCAmelCase=DEFAULT_SOURCE_URL, ) -> int:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=_UpperCAmelCase, one_hot=_UpperCAmelCase, dtype=_UpperCAmelCase, seed=_UpperCAmelCase )
lowerCAmelCase : str = fake()
lowerCAmelCase : Dict = fake()
lowerCAmelCase : List[Any] = fake()
return _Datasets(train=_UpperCAmelCase, validation=_UpperCAmelCase, test=_UpperCAmelCase )
if not source_url: # empty string check
lowerCAmelCase : Optional[int] = DEFAULT_SOURCE_URL
lowerCAmelCase : Any = 'train-images-idx3-ubyte.gz'
lowerCAmelCase : int = 'train-labels-idx1-ubyte.gz'
lowerCAmelCase : int = 't10k-images-idx3-ubyte.gz'
lowerCAmelCase : str = 't10k-labels-idx1-ubyte.gz'
lowerCAmelCase : Optional[int] = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + train_images_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : Optional[int] = _extract_images(_UpperCAmelCase )
lowerCAmelCase : Any = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : int = _extract_labels(_UpperCAmelCase, one_hot=_UpperCAmelCase )
lowerCAmelCase : Tuple = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + test_images_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : Tuple = _extract_images(_UpperCAmelCase )
lowerCAmelCase : Any = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : Tuple = _extract_labels(_UpperCAmelCase, one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
lowerCAmelCase : int = (
'Validation size should be between 0 and '
f"{len(_UpperCAmelCase )}. Received: {validation_size}."
)
raise ValueError(_UpperCAmelCase )
lowerCAmelCase : int = train_images[:validation_size]
lowerCAmelCase : Tuple = train_labels[:validation_size]
lowerCAmelCase : int = train_images[validation_size:]
lowerCAmelCase : Tuple = train_labels[validation_size:]
lowerCAmelCase : List[Any] = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowerCAmelCase : Optional[Any] = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Any = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase, validation=_UpperCAmelCase, test=_UpperCAmelCase )
| 323
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = "deberta-v2"
def __init__( self : int , UpperCAmelCase_ : Dict=128100 , UpperCAmelCase_ : Optional[int]=1536 , UpperCAmelCase_ : Tuple=24 , UpperCAmelCase_ : Any=24 , UpperCAmelCase_ : Any=6144 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=1E-7 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : int="gelu" , **UpperCAmelCase_ : int , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Union[str, Any] = relative_attention
lowerCAmelCase : List[Any] = max_relative_positions
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase_ ) == str:
lowerCAmelCase : Tuple = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase : str = pos_att_type
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : str = kwargs.get('pooler_hidden_size' , UpperCAmelCase_ )
lowerCAmelCase : Tuple = pooler_dropout
lowerCAmelCase : Union[str, Any] = pooler_hidden_act
class __A ( lowerCAmelCase ):
@property
def lowercase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
lowerCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase__ ( self : int ):
return 12
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : "PreTrainedTokenizerBase" = None , ):
lowerCAmelCase : List[str] = super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 323
| 1
|
import baseaa
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode('utf-8' ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
return baseaa.baadecode(_UpperCAmelCase ).decode('utf-8' )
if __name__ == "__main__":
__A : Any = '''Hello World!'''
__A : Dict = baseaa_encode(test)
print(encoded)
__A : List[Any] = baseaa_decode(encoded)
print(decoded)
| 323
|
__A : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A : Dict = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A : Optional[int] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A : Optional[int] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A : Tuple = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A : int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A : Optional[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 323
| 1
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : Tuple = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A : Any = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class __A ( unittest.TestCase , lowerCAmelCase ):
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = load_tool('text-question-answering' )
self.tool.setup()
lowerCAmelCase : Any = load_tool('text-question-answering' , remote=UpperCAmelCase_ )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.tool(UpperCAmelCase_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowercase__ ( self : str ):
lowerCAmelCase : Any = self.remote_tool(UpperCAmelCase_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = self.tool(text=UpperCAmelCase_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowercase__ ( self : Any ):
lowerCAmelCase : Dict = self.remote_tool(text=UpperCAmelCase_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
| 323
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Any = 13
lowerCAmelCase : Union[str, Any] = 7
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[Any] = 32
lowerCAmelCase : List[str] = 2
lowerCAmelCase : str = 4
lowerCAmelCase : Optional[Any] = 37
lowerCAmelCase : List[Any] = 'gelu'
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Optional[Any] = 512
lowerCAmelCase : Dict = 16
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Union[str, Any] = 0.02
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 4
lowerCAmelCase : Any = None
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = TFRoFormerModel(config=UpperCAmelCase_ )
lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : str = [input_ids, input_mask]
lowerCAmelCase : Any = model(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = True
lowerCAmelCase : List[str] = TFRoFormerForCausalLM(config=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : List[str] = model(UpperCAmelCase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : str = self.num_labels
lowerCAmelCase : Optional[Any] = TFRoFormerForSequenceClassification(config=UpperCAmelCase_ )
lowerCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : str = TFRoFormerForMultipleChoice(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : Any = TFRoFormerForTokenClassification(config=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[int] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase_ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = TFRoFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase : Any = 50000
lowerCAmelCase : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = tf.constant([[4, 10]] )
lowerCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase : int = emba(input_ids.shape )
lowerCAmelCase : str = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : List[Any] ):
# 2,12,16,64
lowerCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : List[str] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase : List[Any] = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase , lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
| 323
| 1
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__A : Tuple = (3, 9, -11, 0, 7, 5, 1, -1)
__A : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __A :
lowerCAmelCase_ : int
lowerCAmelCase_ : Node | None
class __A :
def __init__( self : int , UpperCAmelCase_ : Iterable[int] ):
lowerCAmelCase : Node | None = None
for i in sorted(UpperCAmelCase_ , reverse=UpperCAmelCase_ ):
lowerCAmelCase : Dict = Node(UpperCAmelCase_ , self.head )
def __iter__( self : Optional[int] ):
lowerCAmelCase : Any = self.head
while node:
yield node.data
lowerCAmelCase : int = node.next_node
def __len__( self : Dict ):
return sum(1 for _ in self )
def __str__( self : int ):
return " -> ".join([str(UpperCAmelCase_ ) for node in self] )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(_UpperCAmelCase ) + list(_UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 323
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323
| 1
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Optional[int] = CustomTokenizer
pass
| 323
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : int = "ibert"
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any]=30522 , UpperCAmelCase_ : Optional[int]=768 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : int=3072 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Any=1E-12 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]="absolute" , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : int="none" , **UpperCAmelCase_ : Dict , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : str = intermediate_size
lowerCAmelCase : Any = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : List[Any] = type_vocab_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : str = position_embedding_type
lowerCAmelCase : Any = quant_mode
lowerCAmelCase : Union[str, Any] = force_dequant
class __A ( lowerCAmelCase ):
@property
def lowercase__ ( self : Tuple ):
if self.task == "multiple-choice":
lowerCAmelCase : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 323
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : str = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase_ : ClassVar[Features] = Features({"audio": Audio()} )
lowerCAmelCase_ : ClassVar[Features] = Features({"labels": ClassLabel} )
lowerCAmelCase_ : str = "audio"
lowerCAmelCase_ : str = "labels"
def lowercase__ ( self : Dict , UpperCAmelCase_ : List[str] ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , UpperCAmelCase_ ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
lowerCAmelCase : Optional[int] = copy.deepcopy(self )
lowerCAmelCase : int = self.label_schema.copy()
lowerCAmelCase : Any = features[self.label_column]
lowerCAmelCase : Union[str, Any] = label_schema
return task_template
@property
def lowercase__ ( self : Optional[int] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 323
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return x + 2
class __A ( unittest.TestCase ):
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = 'x = 3'
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
lowerCAmelCase : Dict = 'x = y'
lowerCAmelCase : List[Any] = {'y': 5}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 5, 'y': 5} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = 'y = add_two(x)'
lowerCAmelCase : int = {'x': 3}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 'x = 3'
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = 'x = 3\ny = 5'
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = 'text = f\'This is x: {x}.\''
lowerCAmelCase : str = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'text': 'This is x: 3.'} )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 2} )
lowerCAmelCase : Any = {'x': 8}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 8, 'y': 5} )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = 'test_list = [x, add_two(x)]'
lowerCAmelCase : Optional[Any] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [3, 5] )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : int = 'y = x'
lowerCAmelCase : Optional[int] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 3} )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = 'test_list = [x, add_two(x)]\ntest_list[1]'
lowerCAmelCase : List[str] = {'x': 3}
lowerCAmelCase : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
lowerCAmelCase : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
lowerCAmelCase : List[Any] = {'x': 3}
lowerCAmelCase : Optional[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = 'x = 0\nfor i in range(3):\n x = i'
lowerCAmelCase : str = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {'range': range} , state=UpperCAmelCase_ )
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 2, 'i': 2} )
| 323
| 1
|
import heapq
import sys
import numpy as np
__A : int = tuple[int, int]
class __A :
def __init__( self : Optional[int] ):
lowerCAmelCase : Tuple = []
lowerCAmelCase : List[Any] = set()
def lowercase__ ( self : Dict ):
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def lowercase__ ( self : str ):
return len(self.elements ) == 0
def lowercase__ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(UpperCAmelCase_ )
else:
# update
# print("update", item)
lowerCAmelCase : Any = []
((lowerCAmelCase) , (lowerCAmelCase)) : List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCAmelCase) , (lowerCAmelCase)) : Optional[Any] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
if item in self.set:
self.set.remove(UpperCAmelCase_ )
lowerCAmelCase : Any = []
((lowerCAmelCase) , (lowerCAmelCase)) : str = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCAmelCase) , (lowerCAmelCase)) : List[str] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowercase__ ( self : Optional[Any] ):
return self.elements[0][1]
def lowercase__ ( self : int ):
((lowerCAmelCase) , (lowerCAmelCase)) : List[str] = heapq.heappop(self.elements )
self.set.remove(UpperCAmelCase_ )
return (priority, item)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.array(_UpperCAmelCase )
lowerCAmelCase : Any = np.array(_UpperCAmelCase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return consistent_heuristic(_UpperCAmelCase, _UpperCAmelCase ) // t
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : List[str] = g_function[start] + Wa * heuristics[i](_UpperCAmelCase, _UpperCAmelCase )
return ans
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Any = np.chararray((n, n) )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
lowerCAmelCase : Optional[Any] = '*'
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (j, (n - 1) - i) in blocks:
lowerCAmelCase : Tuple = '#'
lowerCAmelCase : List[Any] = '-'
lowerCAmelCase : Optional[Any] = back_pointer[goal]
while x != start:
((lowerCAmelCase) , (lowerCAmelCase)) : Tuple = x
# print(x)
lowerCAmelCase : Tuple = '-'
lowerCAmelCase : str = back_pointer[x]
lowerCAmelCase : Optional[int] = '-'
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j], end=' ' )
print('<-- End position', end=' ' )
else:
print(grid[i][j], end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowerCAmelCase : Dict = back_pointer[goal]
while x != start:
print(_UpperCAmelCase, end=' ' )
lowerCAmelCase : List[Any] = back_pointer[x]
print(_UpperCAmelCase )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ) -> int:
'''simple docstring'''
for itera in range(_UpperCAmelCase ):
open_list[itera].remove_element(_UpperCAmelCase )
# print("s", s)
# print("j", j)
((lowerCAmelCase) , (lowerCAmelCase)) : Union[str, Any] = s
lowerCAmelCase : List[Any] = (x - 1, y)
lowerCAmelCase : Optional[Any] = (x + 1, y)
lowerCAmelCase : Optional[int] = (x, y + 1)
lowerCAmelCase : str = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = -1
lowerCAmelCase : Optional[int] = float('inf' )
if valid(_UpperCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
lowerCAmelCase : Any = g_function[s] + 1
lowerCAmelCase : Optional[Any] = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCAmelCase, key(_UpperCAmelCase, 0, _UpperCAmelCase, _UpperCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1, _UpperCAmelCase ):
if key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) <= Wa * key(
_UpperCAmelCase, 0, _UpperCAmelCase, _UpperCAmelCase ):
open_list[j].put(
_UpperCAmelCase, key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Tuple = []
for x in range(1, 5 ):
for y in range(1, 6 ):
some_list.append((x, y) )
for x in range(15, 20 ):
some_list.append((x, 17) )
for x in range(10, 19 ):
for y in range(1, 15 ):
some_list.append((x, y) )
# L block
for x in range(1, 4 ):
for y in range(12, 19 ):
some_list.append((x, y) )
for x in range(3, 13 ):
for y in range(16, 19 ):
some_list.append((x, y) )
return some_list
__A : Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__A : str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__A : Any = make_common_ground()
__A : List[Any] = blocks_blk
# hyper parameters
__A : Union[str, Any] = 1
__A : Dict = 1
__A : str = 20
__A : int = 3 # one consistent and two other inconsistent
# start and end destination
__A : str = (0, 0)
__A : List[str] = (n - 1, n - 1)
__A : int = 1
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[int] = {start: 0, goal: float('inf' )}
lowerCAmelCase : List[str] = {start: -1, goal: -1}
lowerCAmelCase : Dict = []
lowerCAmelCase : Any = set()
for i in range(_UpperCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCAmelCase, key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) )
lowerCAmelCase : list[int] = []
lowerCAmelCase : list[int] = []
while open_list[0].minkey() < float('inf' ):
for i in range(1, _UpperCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
else:
lowerCAmelCase , lowerCAmelCase : int = open_list[i].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, )
close_list_inad.append(_UpperCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
else:
lowerCAmelCase : Tuple = open_list[0].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase, 0, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, )
close_list_anchor.append(_UpperCAmelCase )
print('No path found to goal' )
print()
for i in range(n - 1, -1, -1 ):
for j in range(_UpperCAmelCase ):
if (j, i) in blocks:
print('#', end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*', end=' ' )
else:
print('-', end=' ' )
else:
print('*', end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position', end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 323
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Dict = KandinskyVaaPipeline
lowerCAmelCase_ : Any = [
"image_embeds",
"negative_image_embeds",
]
lowerCAmelCase_ : Any = ["image_embeds", "negative_image_embeds"]
lowerCAmelCase_ : Union[str, Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase_ : str = False
@property
def lowercase__ ( self : Dict ):
return 32
@property
def lowercase__ ( self : Optional[Any] ):
return 32
@property
def lowercase__ ( self : Dict ):
return self.time_input_dim
@property
def lowercase__ ( self : Any ):
return self.time_input_dim * 4
@property
def lowercase__ ( self : Union[str, Any] ):
return 100
@property
def lowercase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase : Any = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase : Dict = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def lowercase__ ( self : Tuple ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = self.dummy_unet
lowerCAmelCase : Union[str, Any] = self.dummy_movq
lowerCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type='epsilon' , thresholding=UpperCAmelCase_ , )
lowerCAmelCase : Tuple = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase__ ( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=0 ):
lowerCAmelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith('mps' ):
lowerCAmelCase : Optional[Any] = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCAmelCase : List[str] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Tuple = 'cpu'
lowerCAmelCase : List[Any] = self.get_dummy_components()
lowerCAmelCase : List[Any] = self.pipeline_class(**UpperCAmelCase_ )
lowerCAmelCase : Any = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase : List[str] = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
lowerCAmelCase : Union[str, Any] = output.images
lowerCAmelCase : Dict = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Tuple = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowercase__ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
lowerCAmelCase : str = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
lowerCAmelCase : str = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase : Any = 'red cat, 4k photo'
lowerCAmelCase : int = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase : Dict = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase : List[str] = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCAmelCase : int = pipeline(
image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , output_type='np' , )
lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 323
|
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 1
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while len(_UpperCAmelCase ) > 1:
lowerCAmelCase : Optional[int] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowerCAmelCase : Optional[int] = files.index(min(_UpperCAmelCase ) )
temp += files[min_index]
files.pop(_UpperCAmelCase )
files.append(_UpperCAmelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__A : str = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
__A : Dict = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
__A : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowercase__ ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=False ):
if rouge_types is None:
lowerCAmelCase : List[Any] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
lowerCAmelCase : Dict = rouge_scorer.RougeScorer(rouge_types=UpperCAmelCase_ , use_stemmer=UpperCAmelCase_ )
if use_aggregator:
lowerCAmelCase : List[str] = scoring.BootstrapAggregator()
else:
lowerCAmelCase : Union[str, Any] = []
for ref, pred in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Union[str, Any] = scorer.score(UpperCAmelCase_ , UpperCAmelCase_ )
if use_aggregator:
aggregator.add_scores(UpperCAmelCase_ )
else:
scores.append(UpperCAmelCase_ )
if use_aggregator:
lowerCAmelCase : Dict = aggregator.aggregate()
else:
lowerCAmelCase : List[Any] = {}
for key in scores[0]:
lowerCAmelCase : Optional[Any] = [score[key] for score in scores]
return result
| 323
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323
| 1
|
from collections.abc import Iterable
from typing import Generic, TypeVar
__A : Optional[int] = TypeVar('''_T''')
class __A ( Generic[_T] ):
def __init__( self : Any , UpperCAmelCase_ : Iterable[_T] | None = None ):
lowerCAmelCase : list[_T] = list(iterable or [] )
lowerCAmelCase : list[_T] = []
def __len__( self : Dict ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : int ):
return f"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def lowercase__ ( self : Dict , UpperCAmelCase_ : _T ):
self._stacka.append(UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : str = self._stacka.pop
lowerCAmelCase : Tuple = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 323
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 323
| 1
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Dict = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
lowerCAmelCase : List[str] = hex_num[0] == '-'
if is_negative:
lowerCAmelCase : List[Any] = hex_num[1:]
try:
lowerCAmelCase : List[str] = int(_UpperCAmelCase, 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
lowerCAmelCase : str = ''
while int_num > 0:
lowerCAmelCase : Tuple = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323
| 1
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__A : Dict = False
__A : Optional[Any] = logging.get_logger(__name__)
__A : Tuple = '''ybelkada/fonts'''
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'Pix2StructImageProcessor. Please upgrade torch.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
_check_torch_version()
lowerCAmelCase : str = image_tensor.unsqueeze(0 )
lowerCAmelCase : Union[str, Any] = torch.nn.functional.unfold(_UpperCAmelCase, (patch_height, patch_width), stride=(patch_height, patch_width) )
lowerCAmelCase : Union[str, Any] = patches.reshape(image_tensor.size(0 ), image_tensor.size(1 ), _UpperCAmelCase, _UpperCAmelCase, -1 )
lowerCAmelCase : Tuple = patches.permute(0, 4, 2, 3, 1 ).reshape(
image_tensor.size(2 ) // patch_height, image_tensor.size(3 ) // patch_width, image_tensor.size(1 ) * patch_height * patch_width, )
return patches.unsqueeze(0 )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = 36, _UpperCAmelCase = "black", _UpperCAmelCase = "white", _UpperCAmelCase = 5, _UpperCAmelCase = 5, _UpperCAmelCase = 5, _UpperCAmelCase = 5, _UpperCAmelCase = None, _UpperCAmelCase = None, ) -> Image.Image:
'''simple docstring'''
requires_backends(_UpperCAmelCase, 'vision' )
# Add new lines so that each line is no more than 80 characters.
lowerCAmelCase : int = textwrap.TextWrapper(width=80 )
lowerCAmelCase : Any = wrapper.wrap(text=_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = '\n'.join(_UpperCAmelCase )
if font_bytes is not None and font_path is None:
lowerCAmelCase : int = io.BytesIO(_UpperCAmelCase )
elif font_path is not None:
lowerCAmelCase : List[Any] = font_path
else:
lowerCAmelCase : str = hf_hub_download(_UpperCAmelCase, 'Arial.TTF' )
lowerCAmelCase : List[Any] = ImageFont.truetype(_UpperCAmelCase, encoding='UTF-8', size=_UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowerCAmelCase : str = ImageDraw.Draw(Image.new('RGB', (1, 1), _UpperCAmelCase ) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = temp_draw.textbbox((0, 0), _UpperCAmelCase, _UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
lowerCAmelCase : List[str] = text_width + left_padding + right_padding
lowerCAmelCase : Dict = text_height + top_padding + bottom_padding
lowerCAmelCase : Dict = Image.new('RGB', (image_width, image_height), _UpperCAmelCase )
lowerCAmelCase : Optional[int] = ImageDraw.Draw(_UpperCAmelCase )
draw.text(xy=(left_padding, top_padding), text=_UpperCAmelCase, fill=_UpperCAmelCase, font=_UpperCAmelCase )
return image
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, 'vision' )
# Convert to PIL image if necessary
lowerCAmelCase : List[Any] = to_pil_image(_UpperCAmelCase )
lowerCAmelCase : int = render_text(_UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : List[str] = max(header_image.width, image.width )
lowerCAmelCase : str = int(image.height * (new_width / image.width) )
lowerCAmelCase : Tuple = int(header_image.height * (new_width / header_image.width) )
lowerCAmelCase : str = Image.new('RGB', (new_width, new_height + new_header_height), 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ), (0, 0) )
new_image.paste(image.resize((new_width, new_height) ), (0, new_header_height) )
# Convert back to the original framework if necessary
lowerCAmelCase : List[str] = to_numpy_array(_UpperCAmelCase )
if infer_channel_dimension_format(_UpperCAmelCase ) == ChannelDimension.LAST:
lowerCAmelCase : Tuple = to_channel_dimension_format(_UpperCAmelCase, ChannelDimension.LAST )
return new_image
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : str = ["flattened_patches"]
def __init__( self : Optional[int] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : int = 2048 , UpperCAmelCase_ : bool = False , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : List[Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
lowerCAmelCase : Dict = do_normalize
lowerCAmelCase : Optional[Any] = do_convert_rgb
lowerCAmelCase : List[str] = max_patches
lowerCAmelCase : Any = is_vqa
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : dict , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
lowerCAmelCase : Dict = to_channel_dimension_format(UpperCAmelCase_ , ChannelDimension.FIRST )
lowerCAmelCase : Optional[Any] = torch.from_numpy(UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = patch_size['height'], patch_size['width']
lowerCAmelCase , lowerCAmelCase : str = get_image_size(UpperCAmelCase_ )
# maximize scale s.t.
lowerCAmelCase : str = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowerCAmelCase : List[str] = max(min(math.floor(scale * image_height / patch_height ) , UpperCAmelCase_ ) , 1 )
lowerCAmelCase : int = max(min(math.floor(scale * image_width / patch_width ) , UpperCAmelCase_ ) , 1 )
lowerCAmelCase : Dict = max(num_feasible_rows * patch_height , 1 )
lowerCAmelCase : str = max(num_feasible_cols * patch_width , 1 )
lowerCAmelCase : Tuple = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=UpperCAmelCase_ , antialias=UpperCAmelCase_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowerCAmelCase : Any = torch_extract_patches(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = patches.shape
lowerCAmelCase : Union[str, Any] = patches_shape[1]
lowerCAmelCase : List[str] = patches_shape[2]
lowerCAmelCase : Optional[Any] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowerCAmelCase : Tuple = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowerCAmelCase : Dict = torch.arange(UpperCAmelCase_ ).reshape([rows, 1] ).repeat(1 , UpperCAmelCase_ ).reshape([rows * columns, 1] )
lowerCAmelCase : Optional[Any] = torch.arange(UpperCAmelCase_ ).reshape([1, columns] ).repeat(UpperCAmelCase_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowerCAmelCase : List[Any] = row_ids.to(torch.floataa )
lowerCAmelCase : Optional[int] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowerCAmelCase : Optional[Any] = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowerCAmelCase : str = torch.nn.functional.pad(UpperCAmelCase_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowerCAmelCase : Dict = to_numpy_array(UpperCAmelCase_ )
return result
def lowercase__ ( self : List[str] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] ):
if image.dtype == np.uinta:
lowerCAmelCase : List[str] = image.astype(np.floataa )
# take mean across the whole `image`
lowerCAmelCase : Union[str, Any] = np.mean(UpperCAmelCase_ )
lowerCAmelCase : Dict = np.std(UpperCAmelCase_ )
lowerCAmelCase : List[str] = max(UpperCAmelCase_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : int , ):
lowerCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase : Any = patch_size if patch_size is not None else self.patch_size
lowerCAmelCase : Union[str, Any] = max_patches if max_patches is not None else self.max_patches
lowerCAmelCase : Union[str, Any] = self.is_vqa
if kwargs.get('data_format' , UpperCAmelCase_ ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
lowerCAmelCase : Optional[Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase : Optional[int] = [convert_to_rgb(UpperCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase : str = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
lowerCAmelCase : Tuple = kwargs.pop('font_bytes' , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = kwargs.pop('font_path' , UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Dict = [header_text] * len(UpperCAmelCase_ )
lowerCAmelCase : List[str] = [
render_header(UpperCAmelCase_ , header_text[i] , font_bytes=UpperCAmelCase_ , font_path=UpperCAmelCase_ )
for i, image in enumerate(UpperCAmelCase_ )
]
if do_normalize:
lowerCAmelCase : Any = [self.normalize(image=UpperCAmelCase_ ) for image in images]
# convert to torch tensor and permute
lowerCAmelCase : List[Any] = [
self.extract_flattened_patches(image=UpperCAmelCase_ , max_patches=UpperCAmelCase_ , patch_size=UpperCAmelCase_ )
for image in images
]
# create attention mask in numpy
lowerCAmelCase : Dict = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowerCAmelCase : Dict = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=UpperCAmelCase_ )
return encoded_outputs
| 323
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[Any] = ''
else:
lowerCAmelCase : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : List[str] = dct.pop(_UpperCAmelCase )
lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : str = ViTMSNConfig()
lowerCAmelCase : str = 1_000
lowerCAmelCase : List[str] = 'datasets/huggingface/label-files'
lowerCAmelCase : int = 'imagenet-1k-id2label.json'
lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[Any] = 384
lowerCAmelCase : List[Any] = 1_536
lowerCAmelCase : Union[str, Any] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase : List[Any] = 1_024
lowerCAmelCase : Any = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : Any = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase : Any = 4
elif "l7" in checkpoint_url:
lowerCAmelCase : int = 7
lowerCAmelCase : str = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Tuple = 16
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : List[str] = ViTMSNModel(_UpperCAmelCase )
lowerCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )['target_encoder']
lowerCAmelCase : int = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
lowerCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase, base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase, base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Dict = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
lowerCAmelCase : Any = ViTImageProcessor(
size=config.image_size, image_mean=_UpperCAmelCase, image_std=_UpperCAmelCase )
lowerCAmelCase : List[Any] = image_processor(images=_UpperCAmelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _UpperCAmelCase, atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : List[str] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323
| 1
|
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Tuple = {}
with open(_UpperCAmelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowerCAmelCase : Tuple = []
_list.append([line.split()[1], line.split()[2]] )
lowerCAmelCase : Dict = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowerCAmelCase : Optional[int] = []
_list.append([line.split()[0], line.split()[2]] )
lowerCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
lowerCAmelCase : Tuple = f.read(1 )
lowerCAmelCase : int = start_node
lowerCAmelCase : str = []
lowerCAmelCase : Optional[Any] = start_node
lowerCAmelCase : str = 0
while visiting not in first_solution:
lowerCAmelCase : Any = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_UpperCAmelCase ) and k[0] not in first_solution:
lowerCAmelCase : Union[str, Any] = k[1]
lowerCAmelCase : Dict = k[0]
first_solution.append(_UpperCAmelCase )
lowerCAmelCase : Tuple = distance_of_first_solution + int(_UpperCAmelCase )
lowerCAmelCase : List[Any] = best_node
first_solution.append(_UpperCAmelCase )
lowerCAmelCase : List[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowerCAmelCase : List[Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
lowerCAmelCase : List[Any] = solution.index(_UpperCAmelCase )
for kn in solution[1:-1]:
lowerCAmelCase : Any = solution.index(_UpperCAmelCase )
if n == kn:
continue
lowerCAmelCase : Optional[int] = copy.deepcopy(_UpperCAmelCase )
lowerCAmelCase : List[Any] = kn
lowerCAmelCase : Any = n
lowerCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
lowerCAmelCase : Optional[int] = _tmp[_tmp.index(_UpperCAmelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowerCAmelCase : int = distance + int(i[1] )
_tmp.append(_UpperCAmelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowerCAmelCase : Tuple = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _UpperCAmelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : Union[str, Any] = first_solution
lowerCAmelCase : Any = []
lowerCAmelCase : Tuple = distance_of_first_solution
lowerCAmelCase : Any = solution
while count <= iters:
lowerCAmelCase : Tuple = find_neighborhood(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : str = 0
lowerCAmelCase : Tuple = neighborhood[index_of_best_solution]
lowerCAmelCase : Dict = len(_UpperCAmelCase ) - 1
lowerCAmelCase : Tuple = False
while not found:
lowerCAmelCase : Dict = 0
while i < len(_UpperCAmelCase ):
if best_solution[i] != solution[i]:
lowerCAmelCase : Optional[int] = best_solution[i]
lowerCAmelCase : int = solution[i]
break
lowerCAmelCase : Dict = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowerCAmelCase : Tuple = True
lowerCAmelCase : List[str] = best_solution[:-1]
lowerCAmelCase : Union[str, Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowerCAmelCase : int = cost
lowerCAmelCase : Optional[Any] = solution
else:
lowerCAmelCase : int = index_of_best_solution + 1
lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
if len(_UpperCAmelCase ) >= size:
tabu_list.pop(0 )
lowerCAmelCase : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Optional[int] = generate_neighbours(args.File )
lowerCAmelCase , lowerCAmelCase : List[str] = generate_first_solution(
args.File, _UpperCAmelCase )
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = tabu_search(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, args.Iterations, args.Size, )
print(f"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 323
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : Tuple = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 1
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[str] = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
__A : List[Any] = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Optional[int] = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase : str = int(re.match(r'.*layer_(\d*).*', _UpperCAmelCase )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase : Tuple = re.search(r'[^\d](\d+)$', str(_UpperCAmelCase ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
lowerCAmelCase : Optional[Any] = int(bit_search.groups()[0] )
return bit_size // 8
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if bloom_config_file == "":
lowerCAmelCase : List[Any] = BloomConfig()
else:
lowerCAmelCase : Optional[Any] = BloomConfig.from_json_file(_UpperCAmelCase )
if shard_model:
lowerCAmelCase : Union[str, Any] = os.listdir(_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = sorted(filter(lambda _UpperCAmelCase : s.startswith('layer' ) and "model_00" in s, _UpperCAmelCase ) )
lowerCAmelCase : List[Any] = {'weight_map': {}, 'metadata': {}}
lowerCAmelCase : int = 0
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Optional[Any] = BloomConfig()
for j, file in enumerate(_UpperCAmelCase ):
print('Processing file: {}'.format(_UpperCAmelCase ) )
lowerCAmelCase : Optional[int] = None
for i in range(_UpperCAmelCase ):
# load all TP files
lowerCAmelCase : Optional[int] = file.replace('model_00', f"model_0{i}" )
lowerCAmelCase : List[Any] = torch.load(os.path.join(_UpperCAmelCase, _UpperCAmelCase ), map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase : Union[str, Any] = list(temp.keys() )
for key in keys:
lowerCAmelCase : List[Any] = temp.pop(_UpperCAmelCase )
if tensors is None:
lowerCAmelCase : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase : Optional[Any] = torch.cat([tensors[key], temp[key]], dim=_UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase : int = tensors[key] / pretraining_tp
torch.save(
_UpperCAmelCase, os.path.join(
_UpperCAmelCase, 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ), str(len(_UpperCAmelCase ) ).zfill(5 ) ), ), )
for key in tensors.keys():
lowerCAmelCase : str = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase : Union[str, Any] = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ), str(len(_UpperCAmelCase ) ).zfill(5 ) )
lowerCAmelCase : Optional[int] = BloomConfig()
lowerCAmelCase : Dict = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCAmelCase : Union[str, Any] = total_size
with open(_UpperCAmelCase, 'w', encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_UpperCAmelCase, WEIGHTS_NAME + '.index.json' ), 'w', encoding='utf-8' ) as f:
lowerCAmelCase : Any = json.dumps(_UpperCAmelCase, indent=2, sort_keys=_UpperCAmelCase ) + '\n'
f.write(_UpperCAmelCase )
else:
lowerCAmelCase : Dict = BloomModel(_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = os.listdir(_UpperCAmelCase )
lowerCAmelCase : Any = sorted(filter(lambda _UpperCAmelCase : s.startswith('layer' ) and "model_00" in s, _UpperCAmelCase ) )
lowerCAmelCase : Any = None
for i, file in enumerate(_UpperCAmelCase ):
lowerCAmelCase : str = None
for i in range(_UpperCAmelCase ):
# load all TP files
lowerCAmelCase : List[str] = file.replace('model_00', f"model_0{i}" )
lowerCAmelCase : List[str] = torch.load(os.path.join(_UpperCAmelCase, _UpperCAmelCase ), map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase : str = list(temp.keys() )
for key in keys:
lowerCAmelCase : Tuple = temp.pop(_UpperCAmelCase )
if tensors is None:
lowerCAmelCase : Tuple = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase : List[str] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase : Optional[Any] = torch.cat([tensors[key], temp[key]], dim=_UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase : Union[str, Any] = tensors[key] / pretraining_tp
lowerCAmelCase : List[str] = model.load_state_dict(_UpperCAmelCase, strict=_UpperCAmelCase )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
lowerCAmelCase : List[Any] = set(other_keys.missing_keys )
else:
lowerCAmelCase : Optional[Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(_UpperCAmelCase, exist_ok=_UpperCAmelCase )
lowerCAmelCase : Tuple = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase : Tuple = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
lowerCAmelCase : str = model.to(config.torch_dtype )
torch.save(model.state_dict(), _UpperCAmelCase )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_UpperCAmelCase, 'w', encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
__A : Optional[int] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 323
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 323
| 1
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return x + 2
class __A ( unittest.TestCase ):
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = 'x = 3'
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
lowerCAmelCase : Dict = 'x = y'
lowerCAmelCase : List[Any] = {'y': 5}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 5, 'y': 5} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = 'y = add_two(x)'
lowerCAmelCase : int = {'x': 3}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 'x = 3'
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = 'x = 3\ny = 5'
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = 'text = f\'This is x: {x}.\''
lowerCAmelCase : str = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'text': 'This is x: 3.'} )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 2} )
lowerCAmelCase : Any = {'x': 8}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 8, 'y': 5} )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = 'test_list = [x, add_two(x)]'
lowerCAmelCase : Optional[Any] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [3, 5] )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : int = 'y = x'
lowerCAmelCase : Optional[int] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 3} )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = 'test_list = [x, add_two(x)]\ntest_list[1]'
lowerCAmelCase : List[str] = {'x': 3}
lowerCAmelCase : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
lowerCAmelCase : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
lowerCAmelCase : List[Any] = {'x': 3}
lowerCAmelCase : Optional[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = 'x = 0\nfor i in range(3):\n x = i'
lowerCAmelCase : str = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {'range': range} , state=UpperCAmelCase_ )
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 2, 'i': 2} )
| 323
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __A ( lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = "dinat"
lowerCAmelCase_ : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : Dict=[2, 4, 8, 16] , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase_ : int=3.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : str = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : List[Any] = len(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = num_heads
lowerCAmelCase : Tuple = kernel_size
lowerCAmelCase : List[str] = dilations
lowerCAmelCase : Any = mlp_ratio
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase : int = layer_scale_init_value
lowerCAmelCase : Optional[Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 323
| 1
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__A : Any = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__A : List[str] = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = SavedModel()
lowerCAmelCase : List[Any] = []
with open(os.path.join(_UpperCAmelCase, 'utils', 'tf_ops', 'onnx.json' ) ) as f:
lowerCAmelCase : Union[str, Any] = json.load(_UpperCAmelCase )['opsets']
for i in range(1, opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_UpperCAmelCase )] )
with open(_UpperCAmelCase, 'rb' ) as f:
saved_model.ParseFromString(f.read() )
lowerCAmelCase : Any = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowerCAmelCase : str = sorted(_UpperCAmelCase )
lowerCAmelCase : str = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_UpperCAmelCase )
if strict and len(_UpperCAmelCase ) > 0:
raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(_UpperCAmelCase ) > 0:
print(f"Found the following incompatible ops for the opset {opset}:" )
print(*_UpperCAmelCase, sep='\n' )
else:
print(f"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__A : List[str] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 323
|
from manim import *
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : List[str] = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : str = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : int = [mem.copy() for i in range(4 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = Text('GPU' , font_size=24 )
lowerCAmelCase : Tuple = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : List[str] = Text('Model' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
for i, rect in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.8 )
target.move_to(UpperCAmelCase_ )
model_arr.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Tuple = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Union[str, Any] = Text('Disk' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
lowerCAmelCase : str = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = Square(0.3 )
input.set_fill(UpperCAmelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase_ , buff=0.5 )
self.play(Write(UpperCAmelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase_ , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase_ ) )
self.play(FadeOut(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = Arrow(start=UpperCAmelCase_ , end=UpperCAmelCase_ , color=UpperCAmelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase : int = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
lowerCAmelCase : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase_ ) , Circumscribe(model_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase : int = AnimationGroup(
FadeOut(UpperCAmelCase_ , run_time=0.5 ) , MoveToTarget(UpperCAmelCase_ , run_time=0.5 ) , FadeIn(UpperCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase : int = a_c
lowerCAmelCase : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase_ ) , FadeOut(UpperCAmelCase_ , run_time=0.5 ) , )
lowerCAmelCase : int = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , MoveToTarget(UpperCAmelCase_ ) )
self.wait()
| 323
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = "git_vision_model"
def __init__( self : int , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : Optional[int]=3072 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Any=224 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : int="quick_gelu" , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : List[str]=0.02 , **UpperCAmelCase_ : List[Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : str = num_channels
lowerCAmelCase : Dict = patch_size
lowerCAmelCase : List[Any] = image_size
lowerCAmelCase : Optional[int] = initializer_range
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : int = layer_norm_eps
lowerCAmelCase : Any = hidden_act
@classmethod
def lowercase__ ( cls : int , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Optional[Any] ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase : Any = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
lowerCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[str] = "git"
def __init__( self : List[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=30522 , UpperCAmelCase_ : Optional[Any]=768 , UpperCAmelCase_ : int=6 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : List[Any]=3072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Dict=1024 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[int]=1E-12 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : int="absolute" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Union[str, Any]=101 , UpperCAmelCase_ : str=102 , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : Any , ):
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
if vision_config is None:
lowerCAmelCase : List[Any] = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
lowerCAmelCase : int = GitVisionConfig(**UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : Any = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : List[str] = use_cache
lowerCAmelCase : Optional[Any] = tie_word_embeddings
lowerCAmelCase : Any = num_image_with_embedding
lowerCAmelCase : Dict = bos_token_id
lowerCAmelCase : str = eos_token_id
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = copy.deepcopy(self.__dict__ )
lowerCAmelCase : List[Any] = self.vision_config.to_dict()
lowerCAmelCase : List[str] = self.__class__.model_type
return output
| 323
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 1_000 ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 2**power
lowerCAmelCase : List[Any] = 0
while n:
lowerCAmelCase , lowerCAmelCase : List[str] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 323
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A : Any = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__A : Tuple = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__A : Any = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__A : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
__A : Optional[Any] = np.expand_dims(test_image, axis=0)
__A : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A : Optional[int] = '''Normal'''
if result[0][0] == 1:
__A : str = '''Abnormality detected'''
| 323
| 1
|
import re
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> list:
'''simple docstring'''
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]', str_ )]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : List[Any] = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> str:
'''simple docstring'''
try:
lowerCAmelCase : Any = split_input(_UpperCAmelCase )
if upper:
lowerCAmelCase : Optional[int] = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase : Dict = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
return to_simple_case(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
try:
lowerCAmelCase : Optional[int] = to_simple_case(_UpperCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> str:
'''simple docstring'''
return to_complex_case(_UpperCAmelCase, _UpperCAmelCase, '_' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> str:
'''simple docstring'''
return to_complex_case(_UpperCAmelCase, _UpperCAmelCase, '-' )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 323
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A : str = logging.getLogger(__name__)
class __A ( lowerCAmelCase ):
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None ):
lowerCAmelCase : List[Any] = self.layer[current_layer](UpperCAmelCase_ , UpperCAmelCase_ , head_mask[current_layer] )
lowerCAmelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = BertEncoderWithPabee(UpperCAmelCase_ )
self.init_weights()
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = 0
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
lowerCAmelCase : int = threshold
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = patience
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Tuple = 0
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(UpperCAmelCase_ )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if token_type_ids is None:
lowerCAmelCase : Union[str, Any] = torch.zeros(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = encoder_hidden_states.size()
lowerCAmelCase : Optional[int] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
lowerCAmelCase : Tuple = self.invert_attention_mask(UpperCAmelCase_ )
else:
lowerCAmelCase : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase : Optional[Any] = self.get_head_mask(UpperCAmelCase_ , self.config.num_hidden_layers )
lowerCAmelCase : int = self.embeddings(
input_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ )
lowerCAmelCase : List[str] = embedding_output
if self.training:
lowerCAmelCase : Tuple = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase : Dict = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : List[str] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = output_layers[i](output_dropout(UpperCAmelCase_ ) )
res.append(UpperCAmelCase_ )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase : Union[str, Any] = self.encoder(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = self.pooler(encoder_outputs[0] )
lowerCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase_ )]
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = output_layers[i](UpperCAmelCase_ )
if regression:
lowerCAmelCase : List[str] = logits.detach()
if patient_result is not None:
lowerCAmelCase : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase : Any = 0
else:
lowerCAmelCase : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase : Optional[Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase_ ) ):
patient_counter += 1
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[Any] = logits
if patient_counter == self.patience:
break
lowerCAmelCase : Dict = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Tuple = config.num_labels
lowerCAmelCase : int = BertModelWithPabee(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : List[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=None , ):
lowerCAmelCase : int = self.bert(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase : Any = (logits[-1],)
if labels is not None:
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[int] = 0
for ix, logits_item in enumerate(UpperCAmelCase_ ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Tuple = MSELoss()
lowerCAmelCase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : Tuple = CrossEntropyLoss()
lowerCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase : str = (total_loss / total_weights,) + outputs
return outputs
| 323
| 1
|
import copy
import re
class __A :
lowerCAmelCase_ : Any = "hp"
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Dict = None
@classmethod
def lowercase__ ( cls : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int ):
lowerCAmelCase : Dict = prefix
lowerCAmelCase : Optional[int] = defaults
cls.build_naming_info()
@staticmethod
def lowercase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
if len(UpperCAmelCase_ ) == 0:
return ""
lowerCAmelCase : Tuple = None
if any(char.isdigit() for char in word ):
raise Exception(f"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(UpperCAmelCase_ ) + 1 ):
lowerCAmelCase : Any = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowerCAmelCase : Optional[Any] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(UpperCAmelCase_ : Optional[int] ):
lowerCAmelCase : Dict = ''
while integer != 0:
lowerCAmelCase : int = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
lowerCAmelCase : str = 0
while True:
lowerCAmelCase : List[Any] = word + '#' + int_to_alphabetic(UpperCAmelCase_ )
if sword in info["reverse_short_word"]:
continue
else:
lowerCAmelCase : int = sword
break
lowerCAmelCase : Dict = short_word
lowerCAmelCase : str = word
return short_word
@staticmethod
def lowercase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ):
lowerCAmelCase : Dict = param_name.split('_' )
lowerCAmelCase : List[Any] = [TrialShortNamer.shortname_for_word(UpperCAmelCase_ , UpperCAmelCase_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowerCAmelCase : Union[str, Any] = ['', '_']
for separator in separators:
lowerCAmelCase : Tuple = separator.join(UpperCAmelCase_ )
if shortname not in info["reverse_short_param"]:
lowerCAmelCase : Tuple = shortname
lowerCAmelCase : Union[str, Any] = param_name
return shortname
return param_name
@staticmethod
def lowercase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[str] = TrialShortNamer.shortname_for_key(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = short_name
lowerCAmelCase : Any = param_name
@classmethod
def lowercase__ ( cls : int ):
if cls.NAMING_INFO is not None:
return
lowerCAmelCase : Optional[Any] = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
lowerCAmelCase : Any = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = info
@classmethod
def lowercase__ ( cls : int , UpperCAmelCase_ : List[Any] ):
cls.build_naming_info()
assert cls.PREFIX is not None
lowerCAmelCase : Optional[Any] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowerCAmelCase : Any = cls.NAMING_INFO['short_param'][k]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Union[str, Any] = 1 if v else 0
lowerCAmelCase : int = '' if isinstance(UpperCAmelCase_ , (int, float) ) else '-'
lowerCAmelCase : Union[str, Any] = f"{key}{sep}{v}"
name.append(UpperCAmelCase_ )
return "_".join(UpperCAmelCase_ )
@classmethod
def lowercase__ ( cls : Optional[Any] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : List[Any] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowerCAmelCase : Any = []
else:
lowerCAmelCase : Optional[int] = repr.split('_' )
lowerCAmelCase : Dict = {}
for value in values:
if "-" in value:
lowerCAmelCase , lowerCAmelCase : List[Any] = value.split('-' )
else:
lowerCAmelCase : str = re.sub('[0-9.]' , '' , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = float(re.sub('[^0-9.]' , '' , UpperCAmelCase_ ) )
lowerCAmelCase : List[str] = cls.NAMING_INFO['reverse_short_param'][p_k]
lowerCAmelCase : str = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowerCAmelCase : Optional[Any] = cls.DEFAULTS[k]
return parameters
| 323
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = "deberta-v2"
def __init__( self : int , UpperCAmelCase_ : Dict=128100 , UpperCAmelCase_ : Optional[int]=1536 , UpperCAmelCase_ : Tuple=24 , UpperCAmelCase_ : Any=24 , UpperCAmelCase_ : Any=6144 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=1E-7 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : int="gelu" , **UpperCAmelCase_ : int , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Union[str, Any] = relative_attention
lowerCAmelCase : List[Any] = max_relative_positions
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase_ ) == str:
lowerCAmelCase : Tuple = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase : str = pos_att_type
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : str = kwargs.get('pooler_hidden_size' , UpperCAmelCase_ )
lowerCAmelCase : Tuple = pooler_dropout
lowerCAmelCase : Union[str, Any] = pooler_hidden_act
class __A ( lowerCAmelCase ):
@property
def lowercase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
lowerCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase__ ( self : int ):
return 12
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : "PreTrainedTokenizerBase" = None , ):
lowerCAmelCase : List[str] = super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 323
| 1
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__A : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__A : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__A : int = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> tuple[str, float]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> tuple[str, str]:
'''simple docstring'''
lowerCAmelCase : List[Any] = random.randint(0, len(_UpperCAmelCase ) - 1 )
lowerCAmelCase : str = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = list(_UpperCAmelCase )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase : List[Any] = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ) -> list[str]:
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase : Optional[Any] = int(parent_a[1] * 100 ) + 1
lowerCAmelCase : Optional[int] = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase : Dict = population_score[random.randint(0, _UpperCAmelCase )][0]
lowerCAmelCase , lowerCAmelCase : str = crossover(parent_a[0], _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase, _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase, _UpperCAmelCase ) )
return pop
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
lowerCAmelCase : int = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase : Optional[int] = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase : Any = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase , lowerCAmelCase : Optional[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase : str = [evaluate(_UpperCAmelCase, _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase : Union[str, Any] = sorted(_UpperCAmelCase, key=lambda _UpperCAmelCase : x[1], reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase : Dict = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase : List[Any] = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )], _UpperCAmelCase, _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__A : int = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__A : Any = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__A , __A , __A : Union[str, Any] = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 323
|
__A : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A : Dict = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A : Optional[int] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A : Optional[int] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A : Tuple = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A : int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A : Optional[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 323
| 1
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__A : Any = ['''text''', '''image''', '''audio''']
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
inputs.append(create_inputs(_UpperCAmelCase ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : int = []
for output in outputs:
if isinstance(_UpperCAmelCase, (str, AgentText) ):
output_types.append('text' )
elif isinstance(_UpperCAmelCase, (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_UpperCAmelCase, (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class __A :
def lowercase__ ( self : Tuple ):
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
lowerCAmelCase : str = self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCAmelCase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = create_inputs(self.tool.inputs )
lowerCAmelCase : Optional[int] = self.tool(*UpperCAmelCase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase : Tuple = [outputs]
self.assertListEqual(output_types(UpperCAmelCase_ ) , self.tool.outputs )
def lowercase__ ( self : int ):
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = create_inputs(self.tool.inputs )
lowerCAmelCase : List[str] = self.tool(*UpperCAmelCase_ )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : List[Any] = [outputs]
self.assertEqual(len(UpperCAmelCase_ ) , len(self.tool.outputs ) )
for output, output_type in zip(UpperCAmelCase_ , self.tool.outputs ):
lowerCAmelCase : Dict = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = create_inputs(self.tool.inputs )
lowerCAmelCase : Union[str, Any] = []
for _input, input_type in zip(UpperCAmelCase_ , self.tool.inputs ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase : List[str] = self.tool(*UpperCAmelCase_ )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : int = [outputs]
self.assertEqual(len(UpperCAmelCase_ ) , len(self.tool.outputs ) )
| 323
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A : Any = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __A :
lowerCAmelCase_ : List[Any] = PegasusConfig
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : int = "gelu"
def __init__( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=99 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : Tuple=0 , ):
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Tuple = is_training
lowerCAmelCase : Dict = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : Optional[Any] = eos_token_id
lowerCAmelCase : List[str] = pad_token_id
lowerCAmelCase : Optional[int] = bos_token_id
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : int = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase : Tuple = prepare_pegasus_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Union[str, Any] = 20
lowerCAmelCase : Tuple = model_class_name(UpperCAmelCase_ )
lowerCAmelCase : Tuple = model.encode(inputs_dict['input_ids'] )
lowerCAmelCase , lowerCAmelCase : Dict = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowerCAmelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowerCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : int = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def lowercase__ ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Tuple = 20
lowerCAmelCase : Tuple = model_class_name(UpperCAmelCase_ )
lowerCAmelCase : Any = model.encode(inputs_dict['input_ids'] )
lowerCAmelCase , lowerCAmelCase : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCAmelCase : int = model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ )
lowerCAmelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=None, ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase : Dict = np.not_equal(_UpperCAmelCase, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase : str = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCAmelCase_ : Dict = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Union[str, Any] = False
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = FlaxPegasusModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Any = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : int = model_class(UpperCAmelCase_ )
@jax.jit
def encode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Any ):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
with self.subTest('JIT Enabled' ):
lowerCAmelCase : Optional[Any] = encode_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase : Tuple = encode_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : List[str] ):
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Dict = model_class(UpperCAmelCase_ )
lowerCAmelCase : Tuple = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
lowerCAmelCase : Union[str, Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest('JIT Enabled' ):
lowerCAmelCase : Optional[int] = decode_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase : Optional[Any] = decode_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Any = model_class_name.from_pretrained('google/pegasus-large' , from_pt=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = np.ones((1, 1) )
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@slow
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : str = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
lowerCAmelCase : int = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
lowerCAmelCase : Optional[int] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
lowerCAmelCase : List[str] = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
lowerCAmelCase : List[Any] = tokenizer(UpperCAmelCase_ , return_tensors='np' , truncation=UpperCAmelCase_ , max_length=512 , padding=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = model.generate(**UpperCAmelCase_ , num_beams=2 ).sequences
lowerCAmelCase : List[str] = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
assert tgt_text == decoded
| 323
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Any = 13
lowerCAmelCase : Union[str, Any] = 7
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[Any] = 32
lowerCAmelCase : List[str] = 2
lowerCAmelCase : str = 4
lowerCAmelCase : Optional[Any] = 37
lowerCAmelCase : List[Any] = 'gelu'
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Optional[Any] = 512
lowerCAmelCase : Dict = 16
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Union[str, Any] = 0.02
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 4
lowerCAmelCase : Any = None
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = TFRoFormerModel(config=UpperCAmelCase_ )
lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : str = [input_ids, input_mask]
lowerCAmelCase : Any = model(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = True
lowerCAmelCase : List[str] = TFRoFormerForCausalLM(config=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : List[str] = model(UpperCAmelCase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : str = self.num_labels
lowerCAmelCase : Optional[Any] = TFRoFormerForSequenceClassification(config=UpperCAmelCase_ )
lowerCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : str = TFRoFormerForMultipleChoice(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : Any = TFRoFormerForTokenClassification(config=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[int] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase_ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = TFRoFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase : Any = 50000
lowerCAmelCase : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = tf.constant([[4, 10]] )
lowerCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase : int = emba(input_ids.shape )
lowerCAmelCase : str = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : List[Any] ):
# 2,12,16,64
lowerCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : List[str] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase : List[Any] = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase , lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
| 323
| 1
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __A :
def __init__( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=14 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int=None , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : List[Any] = use_token_type_ids
lowerCAmelCase : Any = use_input_mask
lowerCAmelCase : List[Any] = use_labels
lowerCAmelCase : List[Any] = use_mc_token_ids
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = max_position_embeddings
lowerCAmelCase : List[str] = type_vocab_size
lowerCAmelCase : Optional[int] = type_sequence_label_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Union[str, Any] = num_labels
lowerCAmelCase : Union[str, Any] = num_choices
lowerCAmelCase : List[str] = scope
lowerCAmelCase : List[str] = self.vocab_size - 1
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[Any] = None
if self.use_mc_token_ids:
lowerCAmelCase : str = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = None
if self.use_labels:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : int = self.get_config()
lowerCAmelCase : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : List[str] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowercase__ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , *UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Optional[Any] = CTRLModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , *UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = CTRLLMHeadModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : Union[str, Any] = CTRLForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase_ : List[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase_ : str = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : str = False
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = CTRLModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37 )
def lowercase__ ( self : Tuple ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCAmelCase_ )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase__ ( self : Tuple ):
pass
@slow
def lowercase__ ( self : List[str] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = CTRLModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : List[str] ):
pass
@require_torch
class __A ( unittest.TestCase ):
def lowercase__ ( self : Dict ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : str = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(UpperCAmelCase_ )
lowerCAmelCase : List[str] = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=UpperCAmelCase_ ) # Legal the president is
lowerCAmelCase : Dict = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCAmelCase : int = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase_ )
| 323
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323
| 1
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files', [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
], )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md', 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md', 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json', 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowerCAmelCase : Dict = DatasetInfosDict.from_directory(_UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info', [
DatasetInfo(),
DatasetInfo(
description='foo', features=Features({'a': Value('int32' )} ), builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train'}], download_size=42, ),
], )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = str(_UpperCAmelCase )
dataset_info.write_to_directory(_UpperCAmelCase )
lowerCAmelCase : int = DatasetInfo.from_directory(_UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_UpperCAmelCase, 'dataset_info.json' ) )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = DatasetInfo(
description='foo', citation='bar', homepage='https://foo.bar', license='CC0', features=Features({'a': Value('int32' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train', 'num_examples': 42}], download_checksums={}, download_size=1_337, post_processing_size=442, dataset_size=1_234, size_in_bytes=1_337 + 442 + 1_234, )
lowerCAmelCase : Union[str, Any] = dataset_info._to_yaml_dict()
assert sorted(_UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
lowerCAmelCase : Optional[Any] = yaml.safe_dump(_UpperCAmelCase )
lowerCAmelCase : Tuple = yaml.safe_load(_UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = DatasetInfo()
lowerCAmelCase : int = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict', [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo', features=Features({'a': Value('int32' )} ), builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train'}], download_size=42, )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1_337 ),
} ),
], )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : List[Any] = str(_UpperCAmelCase )
dataset_infos_dict.write_to_directory(_UpperCAmelCase )
lowerCAmelCase : List[str] = DatasetInfosDict.from_directory(_UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase : Optional[int] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_UpperCAmelCase, 'README.md' ) )
| 323
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
def __init__( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : str=5 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Tuple=None , ):
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = image_size
lowerCAmelCase : int = patch_size
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Any = use_labels
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Dict = type_sequence_label_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Optional[int] = mask_ratio
lowerCAmelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase : Any = (image_size // patch_size) ** 2
lowerCAmelCase : Tuple = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ):
lowerCAmelCase : int = ViTMAEModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = ViTMAEForPreTraining(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = (self.image_size // self.patch_size) ** 2
lowerCAmelCase : Tuple = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : int = ViTMAEForPreTraining(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
lowerCAmelCase : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = config_and_inputs
lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : str = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase_ : Dict = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : List[Any] = False
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = ViTMAEModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def lowercase__ ( self : Dict ):
pass
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def lowercase__ ( self : Tuple ):
lowerCAmelCase , lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : str = model_class(UpperCAmelCase_ )
lowerCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ):
# make masks reproducible
np.random.seed(2 )
lowerCAmelCase : Dict = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCAmelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase : Any = torch.from_numpy(UpperCAmelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase : str = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCAmelCase : Dict = outputs[0].cpu().numpy()
lowerCAmelCase : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model_class.from_pretrained(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCAmelCase : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Make sure we don't have nans
lowerCAmelCase : List[Any] = after_outputs[0].cpu().numpy()
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase_ , 1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase__ ( self : Optional[Any] ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase__ ( self : int ):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase__ ( self : Dict ):
pass
@slow
def lowercase__ ( self : List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[Any] = ViTMAEModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[Any] ):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def lowercase__ ( self : Optional[int] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCAmelCase : Optional[int] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(UpperCAmelCase_ )
lowerCAmelCase : Dict = self.default_image_processor
lowerCAmelCase : Union[str, Any] = prepare_img()
lowerCAmelCase : List[Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase : int = ViTMAEConfig()
lowerCAmelCase : Dict = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCAmelCase : Union[str, Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCAmelCase : str = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ ) )
# verify the logits
lowerCAmelCase : int = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_ ) , atol=1E-4 ) )
| 323
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.