code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
class lowerCAmelCase__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCamelCase = order
# a_{0} ... a_{k}
__lowerCamelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__lowerCamelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__lowerCamelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
__lowerCamelCase = [0.0] * self.order
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : list[float] , SCREAMING_SNAKE_CASE__ : list[float] ) -> None:
if len(SCREAMING_SNAKE_CASE__ ) < self.order:
__lowerCamelCase = [1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE__ ) != self.order + 1:
__lowerCamelCase = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != self.order + 1:
__lowerCamelCase = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = a_coeffs
__lowerCamelCase = b_coeffs
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : float ) -> float:
__lowerCamelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__lowerCamelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__lowerCamelCase = self.input_history[:-1]
__lowerCamelCase = self.output_history[:-1]
__lowerCamelCase = sample
__lowerCamelCase = result
return result
| 270
|
from __future__ import annotations
from fractions import Fraction
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __magic_name__ ( __lowerCAmelCase : int ) -> list[str]:
__lowerCamelCase = []
__lowerCamelCase = 11
__lowerCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
__lowerCamelCase = 10
return solutions
def __magic_name__ ( __lowerCAmelCase : int = 2 ) -> int:
__lowerCamelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
__lowerCamelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 270
| 1
|
SCREAMING_SNAKE_CASE :Any = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
UpperCamelCase_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
UpperCamelCase_ = Stack()
UpperCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE_ )
elif i == ")":
# RULE 4
UpperCamelCase_ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase_ = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
operand_stack.push(SCREAMING_SNAKE_CASE_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :str = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 60
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( snake_case ):
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )-> Optional[Any]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=_lowercase , speech_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , feature_extractor=_lowercase , )
def UpperCAmelCase_ ( self , _lowercase = "auto" )-> str:
if slice_size == "auto":
UpperCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
self.enable_attention_slicing(_lowercase )
@torch.no_grad()
def __call__( self , _lowercase , _lowercase=16_000 , _lowercase = 512 , _lowercase = 512 , _lowercase = 50 , _lowercase = 7.5 , _lowercase = None , _lowercase = 1 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , **_lowercase , )-> str:
UpperCamelCase_ = self.speech_processor.feature_extractor(
_lowercase , return_tensors="pt" , sampling_rate=_lowercase ).input_features.to(self.device )
UpperCamelCase_ = self.speech_model.generate(_lowercase , max_length=480_000 )
UpperCamelCase_ = self.speech_processor.tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , normalize=_lowercase )[
0
]
if isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = 1
elif isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = len(_lowercase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_lowercase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_lowercase )}." )
# get prompt text embeddings
UpperCamelCase_ = self.tokenizer(
_lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_embeddings.shape
UpperCamelCase_ = text_embeddings.repeat(1 , _lowercase , 1 )
UpperCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , _lowercase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase_ = 42
if negative_prompt is None:
UpperCamelCase_ = [""] * batch_size
elif type(_lowercase ) is not type(_lowercase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(_lowercase )} !="
F" {type(_lowercase )}." )
elif isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = [negative_prompt]
elif batch_size != len(_lowercase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(_lowercase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
UpperCamelCase_ = negative_prompt
UpperCamelCase_ = text_input_ids.shape[-1]
UpperCamelCase_ = self.tokenizer(
_lowercase , padding="max_length" , max_length=_lowercase , truncation=_lowercase , return_tensors="pt" , )
UpperCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ = uncond_embeddings.shape[1]
UpperCamelCase_ = uncond_embeddings.repeat(1 , _lowercase , 1 )
UpperCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase_ = torch.randn(_lowercase , generator=_lowercase , device="cpu" , dtype=_lowercase ).to(
self.device )
else:
UpperCamelCase_ = torch.randn(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase_ = {}
if accepts_eta:
UpperCamelCase_ = eta
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ = self.scheduler.scale_model_input(_lowercase , _lowercase )
# predict the noise residual
UpperCamelCase_ = self.unet(_lowercase , _lowercase , encoder_hidden_states=_lowercase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase_ , UpperCamelCase_ = noise_pred.chunk(2 )
UpperCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = 1 / 0.18_215 * latents
UpperCamelCase_ = self.vae.decode(_lowercase ).sample
UpperCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(_lowercase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_lowercase , nsfw_content_detected=_lowercase )
| 60
| 1
|
from __future__ import annotations
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->None:
"""simple docstring"""
if start is None:
lowercase : int = 0
if end is None:
lowercase : Optional[int] = len(_UpperCamelCase ) - 1
if start >= end:
return
lowercase : List[Any] = (start + end) // 2
slowsort(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
slowsort(_UpperCamelCase, mid + 1, _UpperCamelCase )
if sequence[end] < sequence[mid]:
lowercase , lowercase : Optional[Any] = sequence[mid], sequence[end]
slowsort(_UpperCamelCase, _UpperCamelCase, end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 337
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337
| 1
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__A = ["small", "medium", "large"]
__A = "lm_head.decoder.weight"
__A = "lm_head.weight"
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = torch.load(UpperCamelCase__ )
__lowerCamelCase = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
__A = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__A = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__A = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 348
|
__A = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCamelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
| 1
|
'''simple docstring'''
from collections import defaultdict
class __UpperCamelCase :
def __init__( self , __a , __a ):
'''simple docstring'''
__a : Dict = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__a : Dict = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__a ) )
]
__a : Optional[Any] = defaultdict(__a ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__a : str = (1 << len(__a )) - 1
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__a : Any = self.count_ways_until(__a , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__a : str = total_ways_util
return self.dp[mask][task_no]
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
for i in range(len(__a ) ):
for j in task_performed[i]:
self.task[j].append(__a )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__lowercase : List[Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__lowercase : int = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 27
|
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase__( __A ):
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
requires_backends(self ,'decord' )
self.check_model_type(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> int:
A__ = {}
if frame_sampling_rate is not None:
A__ = frame_sampling_rate
if num_frames is not None:
A__ = num_frames
A__ = {}
if top_k is not None:
A__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=1 ) -> Union[str, Any]:
if num_frames is None:
A__ = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
A__ = BytesIO(requests.get(__UpperCAmelCase ).content )
A__ = VideoReader(__UpperCAmelCase )
videoreader.seek(0 )
A__ = 0
A__ = num_frames * frame_sampling_rate - 1
A__ = np.linspace(__UpperCAmelCase ,__UpperCAmelCase ,num=__UpperCAmelCase ,dtype=np.intaa )
A__ = videoreader.get_batch(__UpperCAmelCase ).asnumpy()
A__ = list(__UpperCAmelCase )
A__ = self.image_processor(__UpperCAmelCase ,return_tensors=self.framework )
return model_inputs
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
A__ = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(__UpperCAmelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase ,__UpperCAmelCase )]
| 221
| 0
|
import torch
def _a ( ) -> str:
if torch.cuda.is_available():
a = torch.cuda.device_count()
else:
a = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 358
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model"}
UpperCAmelCase__ = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : Optional[Any]="<sep>" , __UpperCAmelCase : int="<pad>" , __UpperCAmelCase : Any="<cls>" , __UpperCAmelCase : List[str]="<mask>" , __UpperCAmelCase : Optional[int]=["<eop>", "<eod>"] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Union[str, Any] , ) ->None:
"""simple docstring"""
a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
a = 3
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
a = jieba
a = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def __lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : List[str] , __UpperCAmelCase : Optional[int] ) ->str:
"""simple docstring"""
a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] ) ->List[str]:
"""simple docstring"""
if self.remove_space:
a = ''' '''.join(inputs.strip().split() )
else:
a = inputs
a = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
a = unicodedata.normalize('''NFKD''' , __UpperCAmelCase )
a = ''''''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
a = outputs.lower()
return outputs
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
a = self.preprocess_text(__UpperCAmelCase )
a = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
a = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
a = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a = cur_pieces[1:]
else:
a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Any ) ->Any:
"""simple docstring"""
return self.sp_model.PieceToId(__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Dict ) ->Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
a = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def __lowerCAmelCase ( self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = super()._decode(*__UpperCAmelCase , **__UpperCAmelCase )
a = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 26
| 0
|
UpperCAmelCase : Optional[Any] = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 280
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
lowerCAmelCase__ = {
'''moussaKam/mbarthez''': 1_024,
'''moussaKam/barthez''': 1_024,
'''moussaKam/barthez-orangesum-title''': 1_024,
}
lowerCAmelCase__ = '''▁'''
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__(self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a = None , **__a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
UpperCamelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
UpperCamelCase = len(self.sp_model ) - 1
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case_ (self , __a , __a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ (self , __a , __a = None , __a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ (self ) -> Any:
return len(self.sp_model )
def snake_case_ (self ) -> int:
UpperCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ (self , __a ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def snake_case_ (self , __a ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(__a )
return spm_id if spm_id else self.unk_token_id
def snake_case_ (self , __a ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__a )
def snake_case_ (self , __a ) -> Union[str, Any]:
UpperCamelCase = []
UpperCamelCase = ""
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(__a )
UpperCamelCase = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__(self ) -> str:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__(self , __a ) -> Optional[int]:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 153
| 0
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) )
__magic_name__ : Any = FileLock(str(tmpdir / """foo.lock""" ) )
__magic_name__ : Optional[Any] = 0.01
with locka.acquire():
with pytest.raises(_A ):
__magic_name__ : Any = time.time()
locka.acquire(_A )
assert time.time() - _start > timeout
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Any = """a""" * 1000 + """.lock"""
__magic_name__ : Union[str, Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_A )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__magic_name__ : Tuple = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_A ):
locka.acquire(0 )
| 138
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class snake_case__ ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> int:
super().__init__()
__magic_name__ : Any = pad_token_id
__magic_name__ : Any = max_length
__magic_name__ : List[str] = vocab
__magic_name__ : List[Any] = merges
__magic_name__ : int = BytePairTokenizer(lowerCAmelCase__ , lowerCAmelCase__ , sequence_length=lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
__magic_name__ : Union[str, Any] = [""" """.join(lowerCAmelCase__ ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : Union[str, Any] = tokenizer.get_vocab()
return cls(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
__magic_name__ : Optional[Any] = GPTaTokenizer.from_pretrained(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
return cls.from_tokenizer(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ ) -> List[Any]:
return cls(**lowerCAmelCase__ )
def __magic_name__ ( self ) -> int:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> int:
__magic_name__ : Dict = self.tf_tokenizer(lowerCAmelCase__ )
__magic_name__ : Dict = tf.ones_like(lowerCAmelCase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ ,__magic_name__ : List[Any] = pad_model_inputs(
lowerCAmelCase__ , max_seq_length=lowerCAmelCase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 138
| 1
|
'''simple docstring'''
import cva
import numpy as np
class A__ :
def __init__( self : Tuple , _a : float , _a : int ) -> List[Any]:
'''simple docstring'''
if k in (0.04, 0.06):
_SCREAMING_SNAKE_CASE =k
_SCREAMING_SNAKE_CASE =window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Any ) -> str:
'''simple docstring'''
return str(self.k )
def A ( self : Optional[int] , _a : str ) -> tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =cva.imread(_a , 0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =img.shape
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =img.copy()
_SCREAMING_SNAKE_CASE =cva.cvtColor(_a , cva.COLOR_GRAY2RGB )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.gradient(_a )
_SCREAMING_SNAKE_CASE =dx**2
_SCREAMING_SNAKE_CASE =dy**2
_SCREAMING_SNAKE_CASE =dx * dy
_SCREAMING_SNAKE_CASE =0.04
_SCREAMING_SNAKE_CASE =self.window_size // 2
for y in range(_a , h - offset ):
for x in range(_a , w - offset ):
_SCREAMING_SNAKE_CASE =ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_SCREAMING_SNAKE_CASE =iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_SCREAMING_SNAKE_CASE =ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_SCREAMING_SNAKE_CASE =(wxx * wyy) - (wxy**2)
_SCREAMING_SNAKE_CASE =wxx + wyy
_SCREAMING_SNAKE_CASE =det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase : str = HarrisCorner(0.0_4, 3)
lowerCamelCase , lowerCamelCase : Union[str, Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 47
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
a : Optional[Any] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
a : Optional[Any] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : str = 'maskformer'
a : Dict = {'hidden_size': 'mask_feature_size'}
a : Optional[Any] = ['resnet', 'swin']
a : List[Any] = ['detr']
def __init__( self : Optional[int] , __lowercase : int = 256 , __lowercase : int = 256 , __lowercase : float = 0.1 , __lowercase : bool = False , __lowercase : Optional[Dict] = None , __lowercase : Optional[Dict] = None , __lowercase : float = 0.02 , __lowercase : float = 1.0 , __lowercase : float = 1.0 , __lowercase : float = 1.0 , __lowercase : float = 20.0 , __lowercase : Optional[bool] = None , **__lowercase : Tuple , ) -> str:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__UpperCAmelCase : List[str] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Dict = backbone_config.pop("""model_type""" )
__UpperCAmelCase : Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__UpperCAmelCase : List[Any] = DetrConfig()
else:
# verify that the decoder is supported
__UpperCAmelCase : List[Any] = (
decoder_config.pop("""model_type""" ) if isinstance(__lowercase , __lowercase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Dict = CONFIG_MAPPING[decoder_type]
__UpperCAmelCase : Union[str, Any] = config_class.from_dict(__lowercase )
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : List[str] = decoder_config
# main feature dimension for the model
__UpperCAmelCase : Union[str, Any] = fpn_feature_size
__UpperCAmelCase : Optional[Any] = mask_feature_size
# initializer
__UpperCAmelCase : int = init_std
__UpperCAmelCase : Any = init_xavier_std
# Hungarian matcher && loss
__UpperCAmelCase : Any = cross_entropy_weight
__UpperCAmelCase : Optional[Any] = dice_weight
__UpperCAmelCase : List[str] = mask_weight
__UpperCAmelCase : Union[str, Any] = use_auxiliary_loss
__UpperCAmelCase : int = no_object_weight
__UpperCAmelCase : int = output_auxiliary_logits
__UpperCAmelCase : Optional[Any] = self.decoder_config.encoder_attention_heads
__UpperCAmelCase : Dict = self.decoder_config.num_hidden_layers
super().__init__(**__lowercase )
@classmethod
def UpperCAmelCase ( cls : int , __lowercase : PretrainedConfig , __lowercase : PretrainedConfig , **__lowercase : str ) -> Tuple:
return cls(
backbone_config=__lowercase , decoder_config=__lowercase , **__lowercase , )
def UpperCAmelCase ( self : List[Any] ) -> Dict[str, any]:
__UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[int] = self.backbone_config.to_dict()
__UpperCAmelCase : Any = self.decoder_config.to_dict()
__UpperCAmelCase : Tuple = self.__class__.model_type
return output
| 114
| 0
|
import os
def __lowercase ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(A_ ) )
SCREAMING_SNAKE_CASE = os.path.join(A_ , """triangle.txt""" )
with open(A_ ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
SCREAMING_SNAKE_CASE = []
for line in triangle:
SCREAMING_SNAKE_CASE = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(A_ ) )
a.append(A_ )
for i in range(1 , len(A_ ) ):
for j in range(len(a[i] ) ):
SCREAMING_SNAKE_CASE = a[i - 1][j] if j != len(a[i - 1] ) else 0
SCREAMING_SNAKE_CASE = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(A_ , A_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 358
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]=7 ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : Union[str, Any]=18 ,lowerCamelCase__ : Optional[int]=30 ,lowerCamelCase__ : int=400 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Optional[int]=[0.5, 0.5, 0.5] ,lowerCamelCase__ : Any=[0.5, 0.5, 0.5] ,) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Tuple = DPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""size""" ) )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
| 193
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : int = '''mgp-str'''
def __init__( self , lowerCAmelCase__=[3_2, 1_2_8] , lowerCAmelCase__=4 , lowerCAmelCase__=3 , lowerCAmelCase__=2_7 , lowerCAmelCase__=3_8 , lowerCAmelCase__=5_0_2_5_7 , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=4.0 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = max_token_length
__SCREAMING_SNAKE_CASE = num_character_labels
__SCREAMING_SNAKE_CASE = num_bpe_labels
__SCREAMING_SNAKE_CASE = num_wordpiece_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = distilled
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = drop_rate
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = attn_drop_rate
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = output_aa_attentions
__SCREAMING_SNAKE_CASE = initializer_range
| 100
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 100 ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100
| 1
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A = """pt"""
elif is_tf_available():
__A = """tf"""
else:
__A = """jax"""
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Dict = ByTaTokenizer
__magic_name__ :str = False
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :Tuple = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2_0 , __UpperCAmelCase=5 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = []
for i in range(len(__UpperCAmelCase ) ):
try:
lowerCAmelCase__ :Union[str, Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ :str = list(filter(lambda __UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __UpperCAmelCase ) )
lowerCAmelCase__ :Tuple = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCAmelCase ) , __UpperCAmelCase ) )
if max_length is not None and len(__UpperCAmelCase ) > max_length:
lowerCAmelCase__ :Optional[int] = toks[:max_length]
if min_length is not None and len(__UpperCAmelCase ) < min_length and len(__UpperCAmelCase ) > 0:
while len(__UpperCAmelCase ) < min_length:
lowerCAmelCase__ :List[str] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ :int = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ :Optional[Any] = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
if " " not in output_txt and len(__UpperCAmelCase ) > 1:
lowerCAmelCase__ :int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCAmelCase )
)
if with_prefix_space:
lowerCAmelCase__ :Dict = ' ' + output_txt
lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
return output_txt, output_ids
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.ta_base_tokenizer
lowerCAmelCase__ :str = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
lowerCAmelCase__ :Any = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.ta_base_tokenizer
lowerCAmelCase__ :int = 'Unicode €.'
lowerCAmelCase__ :Optional[int] = tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded['input_ids'] , __UpperCAmelCase )
# decoding
lowerCAmelCase__ :Dict = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , 'Unicode €.</s>' )
lowerCAmelCase__ :Tuple = tokenizer('e è é ê ë' )
lowerCAmelCase__ :Any = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded['input_ids'] , __UpperCAmelCase )
# decoding
lowerCAmelCase__ :List[Any] = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.ta_base_tokenizer
lowerCAmelCase__ :Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCAmelCase__ :Dict = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
if FRAMEWORK != "jax":
lowerCAmelCase__ :Dict = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ :Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.ta_base_tokenizer
lowerCAmelCase__ :Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , __UpperCAmelCase )
self.assertIn('attention_mask' , __UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , __UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.ta_base_tokenizer
lowerCAmelCase__ :Tuple = [
'Summary of the text.',
'Another summary.',
]
lowerCAmelCase__ :Union[str, Any] = tokenizer(
text_target=__UpperCAmelCase , max_length=3_2 , padding='max_length' , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.ta_base_tokenizer
lowerCAmelCase__ :int = ['A long paragraph for summarization. </s>']
lowerCAmelCase__ :Tuple = ['Summary of the text. </s>']
# fmt: off
lowerCAmelCase__ :Union[str, Any] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
lowerCAmelCase__ :Dict = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
lowerCAmelCase__ :List[Any] = tokenizer(__UpperCAmelCase , text_target=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , batch['input_ids'][0] )
self.assertEqual(__UpperCAmelCase , batch['labels'][0] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase__ :List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ :int = tempfile.mkdtemp()
lowerCAmelCase__ :Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase__ :str = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
shutil.rmtree(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ :Any = tempfile.mkdtemp()
lowerCAmelCase__ :Any = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCAmelCase__ :Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase__ :Any = tokenizer.__class__.from_pretrained(__UpperCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ :List[str] = json.load(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ :Tuple = json.load(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = [F"<extra_id_{i}>" for i in range(1_2_5 )]
lowerCAmelCase__ :List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCAmelCase__ :Union[str, Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ :Tuple = tokenizer_class.from_pretrained(
__UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ :Optional[int] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__UpperCAmelCase )]
lowerCAmelCase__ :List[str] = tokenizer_class.from_pretrained(
__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer_class.from_pretrained(__UpperCAmelCase )
self.assertTrue(tokenizer.decode([2_5_5] ) == '' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.get_tokenizers(fast=__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase__ :List[Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
lowerCAmelCase__ :List[Any] = tokenizer.convert_tokens_to_string(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase__ :Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
lowerCAmelCase__ :str = 0
lowerCAmelCase__ :Dict = tokenizer.convert_ids_to_tokens(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
for attr in attributes_list:
setattr(__UpperCAmelCase , attr + '_id' , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + '_id' ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , attr + '_id' , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + '_id' ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens_ids' ) , [] )
setattr(__UpperCAmelCase , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 369
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :int = """swin"""
__magic_name__ :Tuple = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=9_6 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 1_2, 2_4] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=3_2 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Any = image_size
lowerCAmelCase__ :List[Any] = patch_size
lowerCAmelCase__ :Optional[int] = num_channels
lowerCAmelCase__ :str = embed_dim
lowerCAmelCase__ :Optional[int] = depths
lowerCAmelCase__ :List[str] = len(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = num_heads
lowerCAmelCase__ :List[Any] = window_size
lowerCAmelCase__ :List[Any] = mlp_ratio
lowerCAmelCase__ :int = qkv_bias
lowerCAmelCase__ :Optional[int] = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :List[Any] = drop_path_rate
lowerCAmelCase__ :Any = hidden_act
lowerCAmelCase__ :Dict = use_absolute_embeddings
lowerCAmelCase__ :int = layer_norm_eps
lowerCAmelCase__ :Dict = initializer_range
lowerCAmelCase__ :int = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ :str = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
lowerCAmelCase__ :str = ['stem'] + [F"stage{idx}" for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = version.parse("""1.11""" )
@property
def snake_case ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1E-4
| 254
| 0
|
"""simple docstring"""
import math
def A_ ( _lowerCAmelCase : str = 1_00 ):
"""simple docstring"""
_a = sum(i * i for i in range(1, n + 1 ) )
_a = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 320
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287
| 0
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = DistilBertTokenizer
__SCREAMING_SNAKE_CASE = DistilBertTokenizerFast
__SCREAMING_SNAKE_CASE = True
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 243
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : List[Any] = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 243
| 1
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1 ) -> Optional[int]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
lowerCamelCase : Optional[Any] = []
for old_item in old_list:
lowerCamelCase : Dict = old_item.replace("in_layers.0" ,"norm1" )
lowerCamelCase : int = new_item.replace("in_layers.2" ,"conv1" )
lowerCamelCase : Any = new_item.replace("out_layers.0" ,"norm2" )
lowerCamelCase : Optional[Any] = new_item.replace("out_layers.3" ,"conv2" )
lowerCamelCase : List[Any] = new_item.replace("emb_layers.1" ,"time_emb_proj" )
lowerCamelCase : int = new_item.replace("skip_connection" ,"conv_shortcut" )
lowerCamelCase : Optional[int] = shave_segments(_SCREAMING_SNAKE_CASE ,n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
lowerCamelCase : List[Any] = []
for old_item in old_list:
lowerCamelCase : int = old_item
lowerCamelCase : int = new_item.replace("norm.weight" ,"group_norm.weight" )
lowerCamelCase : Optional[Any] = new_item.replace("norm.bias" ,"group_norm.bias" )
lowerCamelCase : Union[str, Any] = new_item.replace("proj_out.weight" ,"proj_attn.weight" )
lowerCamelCase : int = new_item.replace("proj_out.bias" ,"proj_attn.bias" )
lowerCamelCase : List[Any] = shave_segments(_SCREAMING_SNAKE_CASE ,n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ) -> Dict:
assert isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCamelCase : Tuple = old_checkpoint[path]
lowerCamelCase : Optional[int] = old_tensor.shape[0] // 3
lowerCamelCase : Dict = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCamelCase : List[str] = old_tensor.shape[0] // config["num_head_channels"] // 3
lowerCamelCase : Tuple = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = old_tensor.split(channels // num_heads ,dim=1 )
lowerCamelCase : int = query.reshape(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = key.reshape(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = value.reshape(_SCREAMING_SNAKE_CASE )
for path in paths:
lowerCamelCase : Optional[Any] = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCamelCase : Optional[int] = new_path.replace("middle_block.0" ,"mid_block.resnets.0" )
lowerCamelCase : Optional[int] = new_path.replace("middle_block.1" ,"mid_block.attentions.0" )
lowerCamelCase : Optional[Any] = new_path.replace("middle_block.2" ,"mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCamelCase : str = new_path.replace(replacement["old"] ,replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCamelCase : Optional[Any] = old_checkpoint[path["old"]][:, :, 0]
else:
lowerCamelCase : int = old_checkpoint[path["old"]]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : Union[str, Any] = {}
lowerCamelCase : str = checkpoint["time_embed.0.weight"]
lowerCamelCase : str = checkpoint["time_embed.0.bias"]
lowerCamelCase : Tuple = checkpoint["time_embed.2.weight"]
lowerCamelCase : Any = checkpoint["time_embed.2.bias"]
lowerCamelCase : Any = checkpoint["input_blocks.0.0.weight"]
lowerCamelCase : Tuple = checkpoint["input_blocks.0.0.bias"]
lowerCamelCase : Dict = checkpoint["out.0.weight"]
lowerCamelCase : Dict = checkpoint["out.0.bias"]
lowerCamelCase : Union[str, Any] = checkpoint["out.2.weight"]
lowerCamelCase : Optional[int] = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
lowerCamelCase : List[Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowerCamelCase : Any = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the middle blocks only
lowerCamelCase : str = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowerCamelCase : str = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the output blocks only
lowerCamelCase : List[Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowerCamelCase : int = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
for i in range(1 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : int = (i - 1) // (config["num_res_blocks"] + 1)
lowerCamelCase : int = (i - 1) % (config["num_res_blocks"] + 1)
lowerCamelCase : Any = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
lowerCamelCase : Optional[Any] = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowerCamelCase : List[str] = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
lowerCamelCase : Dict = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
lowerCamelCase : Optional[int] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = {"old": f'''input_blocks.{i}.0''', "new": f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowerCamelCase : int = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path, resnet_op] ,config=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = {
"old": f'''input_blocks.{i}.1''',
"new": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCamelCase : Tuple = {
f'''input_blocks.{i}.1.qkv.bias''': {
"key": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"query": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"value": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
"key": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"query": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"value": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path] ,attention_paths_to_split=_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE ,)
lowerCamelCase : List[str] = middle_blocks[0]
lowerCamelCase : str = middle_blocks[1]
lowerCamelCase : Tuple = middle_blocks[2]
lowerCamelCase : int = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,attention_paths_to_split=_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = i // (config["num_res_blocks"] + 1)
lowerCamelCase : Optional[Any] = i % (config["num_res_blocks"] + 1)
lowerCamelCase : List[str] = [shave_segments(_SCREAMING_SNAKE_CASE ,2 ) for name in output_blocks[i]]
lowerCamelCase : Optional[int] = {}
for layer in output_block_layers:
lowerCamelCase , lowerCamelCase : List[str] = layer.split("." )[0], shave_segments(_SCREAMING_SNAKE_CASE ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : Optional[int] = [layer_name]
if len(_SCREAMING_SNAKE_CASE ) > 1:
lowerCamelCase : Optional[int] = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
lowerCamelCase : Any = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
lowerCamelCase : Optional[int] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = {"old": f'''output_blocks.{i}.0''', "new": f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path] ,config=_SCREAMING_SNAKE_CASE )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCamelCase : Optional[int] = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowerCamelCase : Dict = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
lowerCamelCase : List[Any] = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(_SCREAMING_SNAKE_CASE ) == 2:
lowerCamelCase : List[str] = []
if len(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Optional[Any] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = {
"old": f'''output_blocks.{i}.1''',
"new": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCamelCase : Any = {
f'''output_blocks.{i}.1.qkv.bias''': {
"key": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"query": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"value": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
"key": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"query": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"value": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None ,config=_SCREAMING_SNAKE_CASE ,)
else:
lowerCamelCase : List[str] = renew_resnet_paths(_SCREAMING_SNAKE_CASE ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCamelCase : Tuple = ".".join(["output_blocks", str(_SCREAMING_SNAKE_CASE ), path["old"]] )
lowerCamelCase : Optional[int] = ".".join(["up_blocks", str(_SCREAMING_SNAKE_CASE ), "resnets", str(_SCREAMING_SNAKE_CASE ), path["new"]] )
lowerCamelCase : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE__ : int = json.loads(f.read())
SCREAMING_SNAKE_CASE__ : str = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE__ : List[Any] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ : int = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 48
|
import random
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )]
lowerCamelCase : Dict = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
| 48
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase ( A, unittest.TestCase ):
'''simple docstring'''
_A : Tuple = LDMTextToImagePipeline
_A : List[str] = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
_A : Optional[int] = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_A : str = TEXT_TO_IMAGE_BATCH_PARAMS
_A : List[str] = False
def A_ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase__ = CLIPTextModel(_a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def A_ ( self : List[Any] , _a : str , _a : Dict=0 ):
if str(_a ).startswith('''mps''' ):
UpperCamelCase__ = torch.manual_seed(_a )
else:
UpperCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A_ ( self : str ):
UpperCamelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = LDMTextToImagePipeline(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = pipe(**_a ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
UpperCamelCase__ = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : str , _a : Optional[Any] , _a : List[Any]=torch.floataa , _a : Any=0 ):
UpperCamelCase__ = torch.manual_seed(_a )
UpperCamelCase__ = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) )
UpperCamelCase__ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
UpperCamelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase__ = self.get_inputs(_a )
UpperCamelCase__ = pipe(**_a ).images
UpperCamelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
UpperCamelCase__ = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
UpperCamelCase__ = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Optional[int] , _a : Tuple , _a : str=torch.floataa , _a : Union[str, Any]=0 ):
UpperCamelCase__ = torch.manual_seed(_a )
UpperCamelCase__ = np.random.RandomState(_a ).standard_normal((1, 4, 32, 32) )
UpperCamelCase__ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
UpperCamelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A_ ( self : Optional[int] ):
UpperCamelCase__ = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase__ = self.get_inputs(_a )
UpperCamelCase__ = pipe(**_a ).images[0]
UpperCamelCase__ = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
UpperCamelCase__ = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 351
|
lowercase = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : float ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase__ = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ :Union[str, Any] = logging.get_logger(__name__)
def lowercase_ (A : Dict ):
print('Loading config file...' )
def flatten_yaml_as_dict(A : int , A : str="" , A : Dict="." ):
snake_case__ : List[Any] = []
for k, v in d.items():
snake_case__ : Tuple = parent_key + sep + k if parent_key else k
if isinstance(_UpperCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(_UpperCAmelCase , _UpperCAmelCase , sep=_UpperCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(_UpperCAmelCase )
snake_case__ : Optional[Any] = argparse.Namespace()
with open(_UpperCAmelCase , 'r' ) as yaml_file:
try:
snake_case__ : Dict = yaml.load(_UpperCAmelCase , Loader=yaml.FullLoader )
snake_case__ : Optional[Any] = flatten_yaml_as_dict(_UpperCAmelCase )
for k, v in flat_cfg.items():
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(_UpperCAmelCase , str(_UpperCAmelCase ) ) )
return config
def lowercase_ (A : List[str] , A : Tuple ):
snake_case__ : List[Any] = MobileViTVaConfig()
snake_case__ : Any = False
# dataset
if task_name.startswith('imagenet1k_' ):
snake_case__ : Dict = 1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
snake_case__ : Optional[int] = 3_8_4
else:
snake_case__ : Optional[int] = 2_5_6
snake_case__ : Tuple = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
snake_case__ : Any = 2_1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
snake_case__ : List[Any] = 3_8_4
else:
snake_case__ : Union[str, Any] = 2_5_6
snake_case__ : int = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
snake_case__ : List[str] = 1_5_1
snake_case__ : List[str] = 5_1_2
snake_case__ : Tuple = 'ade20k-id2label.json'
snake_case__ : List[Any] = True
elif task_name.startswith('voc_' ):
snake_case__ : str = 2_1
snake_case__ : Optional[Any] = 5_1_2
snake_case__ : Union[str, Any] = 'pascal-voc-id2label.json'
snake_case__ : Union[str, Any] = True
# orig_config
snake_case__ : Tuple = load_orig_config_file(_UpperCAmelCase )
assert getattr(_UpperCAmelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
snake_case__ : Tuple = getattr(_UpperCAmelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(_UpperCAmelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
snake_case__ : str = getattr(_UpperCAmelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
snake_case__ : Union[str, Any] = getattr(_UpperCAmelCase , 'model.segmentation.output_stride' , 1_6 )
if "_deeplabv3" in task_name:
snake_case__ : List[str] = getattr(_UpperCAmelCase , 'model.segmentation.deeplabv3.aspp_rates' , [1_2, 2_4, 3_6] )
snake_case__ : List[str] = getattr(_UpperCAmelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_1_2 )
snake_case__ : Any = getattr(_UpperCAmelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
snake_case__ : List[Any] = 'huggingface/label-files'
snake_case__ : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
snake_case__ : Optional[int] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : List[str] = idalabel
snake_case__ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def lowercase_ (A : Tuple , A : Union[str, Any] , A : str ):
snake_case__ : Union[str, Any] = dct.pop(_UpperCAmelCase )
snake_case__ : Optional[int] = val
def lowercase_ (A : Dict , A : List[Any]=False ):
if base_model:
snake_case__ : Optional[Any] = ''
else:
snake_case__ : Tuple = 'mobilevitv2.'
snake_case__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
snake_case__ : Optional[Any] = k[8:]
else:
snake_case__ : str = k
if ".block." in k:
snake_case__ : str = k_new.replace('.block.' , '.' )
if ".conv." in k:
snake_case__ : List[str] = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
snake_case__ : Optional[int] = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
snake_case__ : Any = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
snake_case__ : int = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
snake_case__ : Optional[Any] = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
snake_case__ : List[Any] = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
snake_case__ : Dict = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
snake_case__ : Any = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
snake_case__ : List[Any] = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
snake_case__ : Dict = [0, 1]
elif i == 4:
snake_case__ : Optional[int] = [0, 1, 2, 3]
elif i == 5:
snake_case__ : List[str] = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
snake_case__ : List[Any] = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
snake_case__ : Optional[Any] = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
snake_case__ : Any = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
snake_case__ : str = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
snake_case__ : Optional[int] = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
snake_case__ : Union[str, Any] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
snake_case__ : Dict = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
snake_case__ : int = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
snake_case__ : int = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
snake_case__ : Optional[int] = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
snake_case__ : Tuple = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
snake_case__ : List[str] = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def lowercase_ (A : Optional[Any] ):
snake_case__ : Any = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(_UpperCAmelCase )
for k in keys_to_ignore:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ():
snake_case__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
snake_case__ : Optional[Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def lowercase_ (A : Tuple , A : Union[str, Any] , A : Dict , A : Any ):
snake_case__ : Optional[Any] = get_mobilevitva_config(_UpperCAmelCase , _UpperCAmelCase )
# load original state_dict
snake_case__ : int = torch.load(_UpperCAmelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
snake_case__ : Tuple = MobileViTVaForSemanticSegmentation(_UpperCAmelCase ).eval()
snake_case__ : Dict = False
else:
snake_case__ : Union[str, Any] = MobileViTVaForImageClassification(_UpperCAmelCase ).eval()
snake_case__ : List[Any] = False
# remove and rename some keys of load the original model
snake_case__ : List[Any] = checkpoint
remove_unused_keys(_UpperCAmelCase )
snake_case__ : Any = create_rename_keys(_UpperCAmelCase , base_model=_UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# load modified state_dict
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
snake_case__ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
snake_case__ : Optional[Any] = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case__ : str = model(**_UpperCAmelCase )
# verify classification model
if task_name.startswith('imagenet' ):
snake_case__ : Any = outputs.logits
snake_case__ : str = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
snake_case__ : Union[str, Any] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
a_ :Any = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 277
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[int] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.weight.shape
lowerCamelCase__ : Tuple = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
lowerCamelCase__ : Dict = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Tuple = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCamelCase__ : List[str] = mam_aaa['args'] or mam_aaa['cfg']['model']
lowerCamelCase__ : Optional[int] = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase )
lowerCamelCase__ : str = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCamelCase__ : Union[str, Any] = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
lowerCamelCase__ : Optional[Any] = state_dict['decoder.embed_tokens.weight']
lowerCamelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase : str = parser.parse_args()
_UpperCAmelCase : Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 50
| 0
|
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
UpperCAmelCase =[
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def _A ( _a : List[Any] ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
UpperCAmelCase =parser.parse_args()
if args.check_lib:
UpperCAmelCase =importlib.import_module("transformers")
UpperCAmelCase =Path(transformers_module.__file__).parent
else:
UpperCAmelCase =Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 77
|
"""simple docstring"""
from math import factorial
def _A ( _a : int = 1_0_0 ):
"""simple docstring"""
return sum(map(_a , str(factorial(_a ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 77
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = '▁'
UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
UpperCAmelCase_ = {
'facebook/mbart-large-50-one-to-many-mmt': 1_024,
}
# fmt: off
UpperCAmelCase_ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class lowerCamelCase__( _UpperCAmelCase):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Dict = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Any=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="</s>" , UpperCamelCase_: Dict="<s>" , UpperCamelCase_: List[str]="<unk>" , UpperCamelCase_: str="<pad>" , UpperCamelCase_: Union[str, Any]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: str , ):
__lowerCamelCase = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCamelCase = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
__lowerCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCamelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCamelCase = 1
__lowerCamelCase = len(self.sp_model )
__lowerCamelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCAmelCase_ )
}
__lowerCamelCase = {v: k for k, v in self.lang_code_to_id.items()}
__lowerCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowerCamelCase = src_lang if src_lang is not None else """en_XX"""
__lowerCamelCase = self.lang_code_to_id[self._src_lang]
__lowerCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase__ ( self: int ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase__ ( self: List[str] ):
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str ):
__lowerCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self: int ):
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self: str , UpperCamelCase_: Dict ):
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str ):
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCamelCase = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str ):
__lowerCamelCase = []
__lowerCamelCase = """"""
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
__lowerCamelCase = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
__lowerCamelCase = [1] * len(self.prefix_tokens )
__lowerCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + ([0] * len(UpperCAmelCase_ )) + suffix_ones
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] , UpperCamelCase_: Optional[str] , **UpperCamelCase_: List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__lowerCamelCase = src_lang
__lowerCamelCase = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
__lowerCamelCase = self.convert_tokens_to_ids(UpperCAmelCase_ )
__lowerCamelCase = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: str = "en_XX" , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "ro_RO" , **UpperCamelCase_: Optional[Any] , ):
__lowerCamelCase = src_lang
__lowerCamelCase = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self: List[str] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.lang_code_to_id[src_lang]
__lowerCamelCase = [self.cur_lang_code_id]
__lowerCamelCase = [self.eos_token_id]
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str ):
__lowerCamelCase = self.lang_code_to_id[tgt_lang]
__lowerCamelCase = [self.cur_lang_code_id]
__lowerCamelCase = [self.eos_token_id]
| 12
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
else:
_SCREAMING_SNAKE_CASE = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
_SCREAMING_SNAKE_CASE = ["""key_proj""", """value_proj""", """query_proj"""]
_SCREAMING_SNAKE_CASE = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if attributes[0] == "lm_head":
_SCREAMING_SNAKE_CASE = prophet
_SCREAMING_SNAKE_CASE = prophet_old
else:
_SCREAMING_SNAKE_CASE = prophet.prophetnet
_SCREAMING_SNAKE_CASE = prophet_old.model
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
if attribute in mapping:
_SCREAMING_SNAKE_CASE = mapping[attribute]
if not hasattr(snake_case__ ,snake_case__ ) and len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = attribute
elif hasattr(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.weight
logger.info(F'{attribute} is initialized.' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.bias
logger.info(F'{attribute} is initialized' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute in special_keys and hasattr(snake_case__ ,"""in_proj_weight""" ):
_SCREAMING_SNAKE_CASE = old_model.in_proj_weight.shape[0] // 3
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_SCREAMING_SNAKE_CASE = True
break
if attribute.isdigit():
_SCREAMING_SNAKE_CASE = model[int(snake_case__ )]
_SCREAMING_SNAKE_CASE = old_model[int(snake_case__ )]
else:
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if old_attribute == "":
_SCREAMING_SNAKE_CASE = old_model
else:
if not hasattr(snake_case__ ,snake_case__ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 0
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def SCREAMING_SNAKE_CASE_ ( snake_case : list[list[float]] )-> list[list[float]]:
_lowerCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_lowerCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
_lowerCamelCase = [[0.0, 0.0], [0.0, 0.0]]
_lowerCamelCase , _lowerCamelCase = matrix[1][1], matrix[0][0]
_lowerCamelCase , _lowerCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_lowerCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
_lowerCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_lowerCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_lowerCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_lowerCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_lowerCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_lowerCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_lowerCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_lowerCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_lowerCamelCase = array(snake_case )
for i in range(3 ):
for j in range(3 ):
_lowerCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_lowerCamelCase = array(snake_case )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case )
# Calculate the inverse of the matrix
return [[float(d(snake_case ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 363
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 1_500_000 )-> int:
_lowerCamelCase = defaultdict(snake_case )
_lowerCamelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , snake_case , 2 ):
if gcd(snake_case , snake_case ) > 1:
continue
_lowerCamelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(snake_case , limit + 1 , snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'{solution() = }')
| 80
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __A ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = "audio-spectrogram-transformer"
def __init__( self , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=1_6 , __lowerCAmelCase=True , __lowerCAmelCase=1_0 , __lowerCAmelCase=1_0 , __lowerCAmelCase=1_0_2_4 , __lowerCAmelCase=1_2_8 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = patch_size
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = frequency_stride
lowerCamelCase__ = time_stride
lowerCamelCase__ = max_length
lowerCamelCase__ = num_mel_bins
| 209
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCamelCase ( self : int ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_UpperCAmelCase = Vector()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case__ ) , "(0,0,0,0,0,1)" )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case__ ) , 4 )
def UpperCamelCase ( self : int ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2] )
_UpperCAmelCase = Vector([1, 2, 3, 4, 5] )
_UpperCAmelCase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_UpperCAmelCase = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
_UpperCAmelCase = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
_UpperCAmelCase = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCamelCase ( self : str ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
_UpperCAmelCase = Vector([2, -1, 4] ) # for test of dot product
_UpperCAmelCase = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
_UpperCAmelCase = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case__ , snake_case__ ) ) , "(3,4,7)" )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 0, 0, 0, 0, 0] )
_UpperCAmelCase = x.copy()
self.assertEqual(str(snake_case__ ) , str(snake_case__ ) )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case__ ) , "(0,1,0)" )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case__ ) )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCAmelCase = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case__ , snake_case__ ) )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCAmelCase = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case__ , snake_case__ ) )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_UpperCAmelCase = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case__ ) )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCamelCase ( self : str ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 133
| 0
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple ="umt5"
__A : Union[str, Any] =["past_key_values"]
def __init__( self ,_snake_case=25_01_12 ,_snake_case=5_12 ,_snake_case=64 ,_snake_case=10_24 ,_snake_case=8 ,_snake_case=None ,_snake_case=6 ,_snake_case=32 ,_snake_case=1_28 ,_snake_case=0.1 ,_snake_case=1E-6 ,_snake_case=1.0 ,_snake_case="gated-gelu" ,_snake_case=True ,_snake_case=True ,_snake_case="T5Tokenizer" ,_snake_case=True ,_snake_case=0 ,_snake_case=1 ,_snake_case=0 ,**_snake_case ,):
super().__init__(
is_encoder_decoder=_snake_case ,tokenizer_class=_snake_case ,tie_word_embeddings=_snake_case ,pad_token_id=_snake_case ,eos_token_id=_snake_case ,decoder_start_token_id=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Tuple = d_model
UpperCAmelCase_ : Tuple = d_kv
UpperCAmelCase_ : Any = d_ff
UpperCAmelCase_ : int = num_layers
UpperCAmelCase_ : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ : List[Any] = num_heads
UpperCAmelCase_ : Dict = relative_attention_num_buckets
UpperCAmelCase_ : Any = relative_attention_max_distance
UpperCAmelCase_ : Any = dropout_rate
UpperCAmelCase_ : Optional[int] = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_factor
UpperCAmelCase_ : Optional[Any] = feed_forward_proj
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : List[Any] = self.feed_forward_proj.split("-" )
UpperCAmelCase_ : Tuple = act_info[-1]
UpperCAmelCase_ : List[Any] = act_info[0] == "gated"
if len(_snake_case ) > 1 and act_info[0] != "gated" or len(_snake_case ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ : List[str] = "gelu_new"
@property
def UpperCamelCase__ ( self ):
return self.d_model
@property
def UpperCamelCase__ ( self ):
return self.num_heads
@property
def UpperCamelCase__ ( self ):
return self.num_layers
class _snake_case (__SCREAMING_SNAKE_CASE):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ : Any = "past_encoder_sequence + sequence"
UpperCAmelCase_ : int = {0: "batch"}
UpperCAmelCase_ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ : Optional[int] = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ : Any = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_snake_case ,direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCamelCase__ ( self ):
return 13
@property
def UpperCamelCase__ ( self ):
return 5E-4
| 67
|
'''simple docstring'''
from collections.abc import Sequence
def a__ ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0.0
for coeff in reversed(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 67
| 1
|
'''simple docstring'''
class lowerCAmelCase__ :
def __init__( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : str = graph
self._normalize_graph(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : str = len(lowerCamelCase__ )
_UpperCAmelCase : Any = None
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] ) ->int:
'''simple docstring'''
if sources is int:
_UpperCAmelCase : Optional[Any] = [sources]
if sinks is int:
_UpperCAmelCase : str = [sinks]
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
return
_UpperCAmelCase : Optional[Any] = sources[0]
_UpperCAmelCase : Optional[Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowerCamelCase__ ) > 1 or len(lowerCamelCase__ ) > 1:
_UpperCAmelCase : int = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_UpperCAmelCase : Any = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_UpperCAmelCase : List[str] = max_input_flow
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Tuple = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_UpperCAmelCase : Any = max_input_flow
_UpperCAmelCase : Optional[int] = size - 1
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] = algorithm(self )
class lowerCAmelCase__ :
def __init__( self : str , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = flow_network
_UpperCAmelCase : List[Any] = flow_network.verticesCount
_UpperCAmelCase : Tuple = flow_network.sourceIndex
_UpperCAmelCase : Tuple = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_UpperCAmelCase : int = flow_network.graph
_UpperCAmelCase : Tuple = False
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_UpperCAmelCase : Union[str, Any] = True
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
pass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : str , lowerCamelCase__ : Dict ) ->str:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
# use this to save your result
_UpperCAmelCase : Tuple = -1
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )]
_UpperCAmelCase : List[str] = [0] * self.verticies_count
_UpperCAmelCase : List[str] = [0] * self.verticies_count
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_UpperCAmelCase : Any = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_UpperCAmelCase : Dict = 0
while i < len(lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = vertices_list[i]
_UpperCAmelCase : str = self.heights[vertex_index]
self.process_vertex(lowerCamelCase__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = 0
else:
i += 1
_UpperCAmelCase : Dict = sum(self.preflow[self.source_index] )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowerCamelCase__ , lowerCamelCase__ )
self.relabel(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_UpperCAmelCase : Any = self.heights[to_index]
if min_height is not None:
_UpperCAmelCase : Tuple = min_height + 1
if __name__ == "__main__":
lowerCamelCase__ = [0]
lowerCamelCase__ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowerCamelCase__ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowerCamelCase__ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowerCamelCase__ = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 234
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = "deta"
lowerCAmelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[Any] , lowerCamelCase__ : str=None , lowerCamelCase__ : str=9_00 , lowerCamelCase__ : Any=20_48 , lowerCamelCase__ : Optional[int]=6 , lowerCamelCase__ : str=20_48 , lowerCamelCase__ : Dict=8 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Union[str, Any]=10_24 , lowerCamelCase__ : Optional[int]=8 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int="relu" , lowerCamelCase__ : str=2_56 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : Optional[int]=0.0_2 , lowerCamelCase__ : List[Any]=1.0 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Any=False , lowerCamelCase__ : Any="sine" , lowerCamelCase__ : str=5 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=3_00 , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : Any=5 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : str=1 , lowerCamelCase__ : Union[str, Any]=5 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : Union[str, Any]=0.2_5 , **lowerCamelCase__ : Optional[Any] , ) ->List[str]:
'''simple docstring'''
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase : int = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = backbone_config.pop("model_type" )
_UpperCAmelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : List[str] = config_class.from_dict(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = backbone_config
_UpperCAmelCase : Optional[int] = num_queries
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = d_model
_UpperCAmelCase : str = encoder_ffn_dim
_UpperCAmelCase : Optional[int] = encoder_layers
_UpperCAmelCase : int = encoder_attention_heads
_UpperCAmelCase : Union[str, Any] = decoder_ffn_dim
_UpperCAmelCase : Tuple = decoder_layers
_UpperCAmelCase : Union[str, Any] = decoder_attention_heads
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : Union[str, Any] = activation_dropout
_UpperCAmelCase : Optional[int] = activation_function
_UpperCAmelCase : str = init_std
_UpperCAmelCase : Tuple = init_xavier_std
_UpperCAmelCase : Optional[Any] = encoder_layerdrop
_UpperCAmelCase : int = auxiliary_loss
_UpperCAmelCase : Union[str, Any] = position_embedding_type
# deformable attributes
_UpperCAmelCase : List[Any] = num_feature_levels
_UpperCAmelCase : List[Any] = encoder_n_points
_UpperCAmelCase : Tuple = decoder_n_points
_UpperCAmelCase : Optional[int] = two_stage
_UpperCAmelCase : Dict = two_stage_num_proposals
_UpperCAmelCase : int = with_box_refine
_UpperCAmelCase : str = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_UpperCAmelCase : Optional[int] = class_cost
_UpperCAmelCase : Dict = bbox_cost
_UpperCAmelCase : int = giou_cost
# Loss coefficients
_UpperCAmelCase : int = mask_loss_coefficient
_UpperCAmelCase : List[Any] = dice_loss_coefficient
_UpperCAmelCase : Dict = bbox_loss_coefficient
_UpperCAmelCase : int = giou_loss_coefficient
_UpperCAmelCase : Optional[Any] = eos_coefficient
_UpperCAmelCase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def lowerCAmelCase__ ( self : int ) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
return self.d_model
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Tuple = self.backbone_config.to_dict()
_UpperCAmelCase : List[Any] = self.__class__.model_type
return output
| 234
| 1
|
"""simple docstring"""
from PIL import Image
def UpperCAmelCase__ ( lowerCAmelCase__ :Image ) -> Image:
'''simple docstring'''
lowercase , lowercase = image.size
lowercase = 0
lowercase = image.load()
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
lowercase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowercase = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCAmelCase : Tuple =mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 32
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = GPTSanJapaneseTokenizer
snake_case__ : int = False
snake_case__ : Tuple = {'do_clean_text': False, 'add_prefix_space': False}
def A__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowercase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowercase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.get_input_output_texts(__lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、世界。 こんばんは、㔺界。"""
lowercase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowercase = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowercase = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。こんばんは、世界。😀"""
lowercase = tokenizer.encode(prefix_text + input_text )
lowercase = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowercase = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = [1] + [0] * (len_prefix + len_text + 1)
lowercase = [1] * (len_prefix + len_text + 1) + [0]
lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase = tokenizer(prefix_text + input_text ).token_type_ids
lowercase = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowercase = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = tokenizer.encode("""あンいワ""" )
lowercase = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowercase = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowercase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
lowercase = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
lowercase = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
pass
| 32
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase__ = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['DPTFeatureExtractor']
UpperCamelCase__ = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
SCREAMING_SNAKE_CASE__ = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(__UpperCAmelCase )
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = load_dataset("""nielsr/rvlcdip-demo""" )
SCREAMING_SNAKE_CASE__ = dataset["""train"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=__UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 165
| 0
|
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = ["image_processor", "tokenizer"]
a__ : List[str] = "AutoImageProcessor"
a__ : Tuple = "AutoTokenizer"
def __init__( self : int , _lowercase : Tuple=None , _lowercase : List[Any]=None , **_lowercase : int ):
__UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowercase , )
__UpperCAmelCase = kwargs.pop('''feature_extractor''' )
__UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.image_processor
__UpperCAmelCase = False
def __call__( self : Dict , *_lowercase : Optional[Any] , **_lowercase : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''images''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__UpperCAmelCase = self.image_processor(_lowercase , *_lowercase , **_lowercase )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__UpperCAmelCase = encodings['''input_ids''']
return inputs
def a ( self : Optional[int] , *_lowercase : Optional[Any] , **_lowercase : Any ):
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Tuple , *_lowercase : List[str] , **_lowercase : Tuple ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
@contextmanager
def a ( self : str ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
__UpperCAmelCase = True
__UpperCAmelCase = self.tokenizer
yield
__UpperCAmelCase = self.image_processor
__UpperCAmelCase = False
def a ( self : Optional[int] , _lowercase : Optional[int] , _lowercase : Dict=False , _lowercase : List[str]=None ):
if added_vocab is None:
__UpperCAmelCase = self.tokenizer.get_added_vocab()
__UpperCAmelCase = {}
while tokens:
__UpperCAmelCase = re.search(r'''<s_(.*?)>''' , _lowercase , re.IGNORECASE )
if start_token is None:
break
__UpperCAmelCase = start_token.group(1 )
__UpperCAmelCase = re.search(rF'''</s_{key}>''' , _lowercase , re.IGNORECASE )
__UpperCAmelCase = start_token.group()
if end_token is None:
__UpperCAmelCase = tokens.replace(_lowercase , '''''' )
else:
__UpperCAmelCase = end_token.group()
__UpperCAmelCase = re.escape(_lowercase )
__UpperCAmelCase = re.escape(_lowercase )
__UpperCAmelCase = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , _lowercase , re.IGNORECASE )
if content is not None:
__UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__UpperCAmelCase = self.tokenajson(_lowercase , is_inner_value=_lowercase , added_vocab=_lowercase )
if value:
if len(_lowercase ) == 1:
__UpperCAmelCase = value[0]
__UpperCAmelCase = value
else: # leaf nodes
__UpperCAmelCase = []
for leaf in content.split(r'''<sep/>''' ):
__UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(_lowercase )
if len(output[key] ) == 1:
__UpperCAmelCase = output[key][0]
__UpperCAmelCase = tokens[tokens.find(_lowercase ) + len(_lowercase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_lowercase , added_vocab=_lowercase )
if len(_lowercase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def a ( self : Tuple ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowercase , )
return self.image_processor_class
@property
def a ( self : Tuple ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowercase , )
return self.image_processor
| 367
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_lowercase : Any = True
except ImportError:
_lowercase : str = False
try:
from torch.hub import _get_torch_home
_lowercase : Any = _get_torch_home()
except ImportError:
_lowercase : Dict = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
_lowercase : Tuple = os.path.join(torch_cache_home, 'transformers')
_lowercase : int = 'https://cdn.huggingface.co'
_lowercase : Union[str, Any] = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_lowercase : str = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
_lowercase : str = os.path.join(PATH, 'config.yaml')
_lowercase : int = os.path.join(PATH, 'attributes.txt')
_lowercase : List[str] = os.path.join(PATH, 'objects.txt')
_lowercase : Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
_lowercase : int = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
_lowercase : Dict = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
_lowercase : Union[str, Any] = 'pytorch_model.bin'
_lowercase : List[str] = 'config.yaml'
def lowercase__ ( snake_case_ :int=OBJECTS , snake_case_ :Optional[int]=ATTRIBUTES ):
__UpperCAmelCase = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__UpperCAmelCase = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__UpperCAmelCase = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__UpperCAmelCase = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__UpperCAmelCase = v
return r
class _UpperCAmelCase :
a__ : Tuple = {}
def __init__( self : List[str] , _lowercase : dict , _lowercase : str = "root" , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = name
__UpperCAmelCase = level
__UpperCAmelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase = copy.deepcopy(_lowercase )
__UpperCAmelCase = copy.deepcopy(_lowercase )
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = Config(_lowercase , name=_lowercase , level=level + 1 )
__UpperCAmelCase = v
setattr(self , _lowercase , _lowercase )
__UpperCAmelCase = d
def __repr__( self : Any ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Dict ):
__UpperCAmelCase = val
__UpperCAmelCase = val
__UpperCAmelCase = key.split('''.''' )
__UpperCAmelCase = len(_lowercase ) - 1
__UpperCAmelCase = self._pointer
if len(_lowercase ) > 1:
for i, l in enumerate(_lowercase ):
if hasattr(self , _lowercase ) and isinstance(getattr(self , _lowercase ) , _lowercase ):
setattr(getattr(self , _lowercase ) , '''.'''.join(levels[i:] ) , _lowercase )
if l == last_level:
__UpperCAmelCase = val
else:
__UpperCAmelCase = pointer[l]
def a ( self : int ):
return self._pointer
def a ( self : List[str] , _lowercase : Dict , _lowercase : str ):
with open(F'''{file_name}''' , '''w''' ) as stream:
dump(_lowercase , _lowercase )
def a ( self : int , _lowercase : Dict , _lowercase : Tuple ):
with open(F'''{file_name}''' , '''w''' ) as stream:
json.dump(_lowercase , _lowercase )
@staticmethod
def a ( _lowercase : str ):
with open(_lowercase ) as stream:
__UpperCAmelCase = load(_lowercase , Loader=_lowercase )
return data
def __str__( self : Dict ):
__UpperCAmelCase = ''' '''
if self._name != "root":
__UpperCAmelCase = F'''{t * (self._level-1)}{self._name}:\n'''
else:
__UpperCAmelCase = ''''''
__UpperCAmelCase = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_lowercase , _lowercase ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_lowercase ).__name__})\n'''
__UpperCAmelCase = level
return r[:-1]
@classmethod
def a ( cls : str , _lowercase : str , **_lowercase : Any ):
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(_lowercase , **_lowercase )
return cls(_lowercase )
@classmethod
def a ( cls : Any , _lowercase : str , **_lowercase : str ):
__UpperCAmelCase = kwargs.pop('''cache_dir''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''force_download''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''resume_download''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''proxies''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''local_files_only''' , _lowercase )
if os.path.isdir(_lowercase ):
__UpperCAmelCase = os.path.join(_lowercase , _lowercase )
elif os.path.isfile(_lowercase ) or is_remote_url(_lowercase ):
__UpperCAmelCase = pretrained_model_name_or_path
else:
__UpperCAmelCase = hf_bucket_url(_lowercase , filename=_lowercase , use_cdn=_lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase = cached_path(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase = Config.load_yaml(_lowercase )
except EnvironmentError:
__UpperCAmelCase = '''Can\'t load config for'''
raise EnvironmentError(_lowercase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(_lowercase ), kwargs
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = torch.load('''dump.pt''' , map_location=in_tensor.device )
__UpperCAmelCase = in_tensor.numpy()
__UpperCAmelCase = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( snake_case_ :str , snake_case_ :str , snake_case_ :List[str]=True ):
__UpperCAmelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase = '''/''' not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( snake_case_ :str , snake_case_ :Tuple , snake_case_ :List[str]=None , snake_case_ :List[str]=0 , snake_case_ :List[Any]=None , ):
__UpperCAmelCase = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__UpperCAmelCase = {'''user-agent''': ua}
if resume_size > 0:
__UpperCAmelCase = '''bytes=%d-''' % (resume_size,)
__UpperCAmelCase = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase = response.headers.get('''Content-Length''' )
__UpperCAmelCase = resume_size + int(snake_case_ ) if content_length is not None else None
__UpperCAmelCase = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :str=None , snake_case_ :Optional[int]=False , snake_case_ :List[Any]=None , snake_case_ :List[Any]=10 , snake_case_ :Optional[int]=False , snake_case_ :List[str]=None , snake_case_ :Union[str, Any]=False , ):
if cache_dir is None:
__UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__UpperCAmelCase = None
if not local_files_only:
try:
__UpperCAmelCase = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__UpperCAmelCase = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__UpperCAmelCase = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__UpperCAmelCase = _resumable_file_manager
if os.path.exists(snake_case_ ):
__UpperCAmelCase = os.stat(snake_case_ ).st_size
else:
__UpperCAmelCase = 0
else:
__UpperCAmelCase = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__UpperCAmelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__UpperCAmelCase = {'''url''': url, '''etag''': etag}
__UpperCAmelCase = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def lowercase__ ( snake_case_ :int , snake_case_ :str=None ):
__UpperCAmelCase = url.encode('''utf-8''' )
__UpperCAmelCase = shaaaa(snake_case_ )
__UpperCAmelCase = url_hash.hexdigest()
if etag:
__UpperCAmelCase = etag.encode('''utf-8''' )
__UpperCAmelCase = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase__ ( snake_case_ :Dict , snake_case_ :List[Any]=None , snake_case_ :List[Any]=False , snake_case_ :Optional[int]=None , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=None , snake_case_ :Any=False , snake_case_ :int=False , snake_case_ :Optional[int]=False , ):
if cache_dir is None:
__UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__UpperCAmelCase = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase = os.path.split(snake_case_ )
__UpperCAmelCase = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__UpperCAmelCase = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any]="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__UpperCAmelCase = eval(f.read() )
else:
__UpperCAmelCase = requests.get(snake_case_ )
try:
__UpperCAmelCase = requests.json()
except Exception:
__UpperCAmelCase = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase = eval(snake_case_ )
except Exception:
__UpperCAmelCase = data.split('''\n''' )
req.close()
return data
def lowercase__ ( snake_case_ :Union[str, Any] ):
__UpperCAmelCase = requests.get(snake_case_ )
__UpperCAmelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__UpperCAmelCase = pkl.load(snake_case_ )
__UpperCAmelCase = weights.pop('''model''' )
__UpperCAmelCase = {}
for k, v in model.items():
__UpperCAmelCase = torch.from_numpy(snake_case_ )
if "running_var" in k:
__UpperCAmelCase = torch.tensor([0] )
__UpperCAmelCase = k.replace('''running_var''' , '''num_batches_tracked''' )
__UpperCAmelCase = zero
return new
def lowercase__ ( ):
print(F'''{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__UpperCAmelCase = cva.imread(snake_case_ )
else:
__UpperCAmelCase = get_image_from_url(snake_case_ )
assert img is not None, F'''could not connect to: {im}'''
__UpperCAmelCase = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase = img[:, :, ::-1]
return img
def lowercase__ ( snake_case_ :Any , snake_case_ :int=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ ))
| 86
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = StableDiffusionXLImgaImgPipeline
UpperCAmelCase_ : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
UpperCAmelCase_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCAmelCase_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCAmelCase : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
UpperCAmelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=32 , )
UpperCAmelCase : int = CLIPTextModel(lowerCAmelCase_ )
UpperCAmelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = CLIPTextModelWithProjection(lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Union[str, Any]=0 ) -> Optional[Any]:
UpperCAmelCase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = image / 2 + 0.5
if str(lowerCAmelCase_ ).startswith('mps' ):
UpperCAmelCase : Any = torch.manual_seed(lowerCAmelCase_ )
else:
UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
UpperCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase : Any = sd_pipe(**lowerCAmelCase_ ).images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Optional[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase_ ( self : int ) -> Any:
pass
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
UpperCAmelCase : Tuple = self.get_dummy_components()
UpperCAmelCase : int = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase_ )
UpperCAmelCase : int = sd_pipe.to(lowerCAmelCase_ )
UpperCAmelCase : int = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# forward without prompt embeds
UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = 3 * ['this is a negative prompt']
UpperCAmelCase : List[Any] = negative_prompt
UpperCAmelCase : List[Any] = 3 * [inputs['prompt']]
UpperCAmelCase : List[str] = sd_pipe(**lowerCAmelCase_ )
UpperCAmelCase : Any = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCAmelCase : List[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = 3 * ['this is a negative prompt']
UpperCAmelCase : List[Any] = 3 * [inputs.pop('prompt' )]
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = sd_pipe.encode_prompt(lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = sd_pipe(
**lowerCAmelCase_ , prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , pooled_prompt_embeds=lowerCAmelCase_ , negative_pooled_prompt_embeds=lowerCAmelCase_ , )
UpperCAmelCase : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : int , lowercase_ : str , lowercase_ : List[str]="cpu" , lowercase_ : Optional[Any]=torch.floataa , lowercase_ : List[Any]=0 ) -> str:
UpperCAmelCase : List[str] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 64, 64) )
UpperCAmelCase : str = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : Tuple ) -> int:
UpperCAmelCase : List[str] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = self.get_inputs(lowerCAmelCase_ )
UpperCAmelCase : int = pipe(**lowerCAmelCase_ ).images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : int = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 151
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_snake_case : Tuple = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_snake_case : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a_ ( lowerCAmelCase_ : str ):
if "://" in dataset_path:
__lowerCAmelCase = dataset_path.split('://' )[1]
return dataset_path
def a_ ( lowerCAmelCase_ : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a_ ( lowerCAmelCase_ : fsspec.AbstractFileSystem, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = not is_remote_filesystem(lowerCAmelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCAmelCase_ ), fs._strip_protocol(lowerCAmelCase_ ) )
else:
fs.mv(lowerCAmelCase_, lowerCAmelCase_, recursive=lowerCAmelCase_ )
def a_ ( ):
if hasattr(fsspec.asyn, 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = threading.Lock()
| 284
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Any = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 134
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Any = []
__snake_case : Optional[Any] = []
__snake_case : List[Any] = []
for rt in rc.restypes:
__snake_case : Any = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__snake_case : Tuple = {name: i for i, name in enumerate(__lowerCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__snake_case : int = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
__snake_case : List[str] = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
__snake_case : Optional[Any] = torch.tensor(
__lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
__snake_case : Optional[int] = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__snake_case : Optional[Any] = restype_atomaa_to_atomaa[protein_aatype]
__snake_case : Tuple = restype_atomaa_mask[protein_aatype]
__snake_case : Optional[Any] = residx_atomaa_mask
__snake_case : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__snake_case : Dict = restype_atomaa_to_atomaa[protein_aatype]
__snake_case : Dict = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__snake_case : List[str] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
__snake_case : List[str] = rc.restype_atoa[restype_letter]
__snake_case : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__snake_case : Union[str, Any] = rc.atom_order[atom_name]
__snake_case : str = 1
__snake_case : List[str] = restype_atomaa_mask[protein_aatype]
__snake_case : List[str] = residx_atomaa_mask
return protein
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = tree_map(lambda __lowerCamelCase : torch.tensor(__lowerCamelCase , device=batch["aatype"].device ) , __lowerCamelCase , np.ndarray )
__snake_case : str = tensor_tree_map(lambda __lowerCamelCase : np.array(__lowerCamelCase ) , make_atomaa_masks(__lowerCamelCase ) )
return out
| 134
| 1
|
'''simple docstring'''
import copy
import re
class lowercase__ :
'''simple docstring'''
A_ : Optional[int] = 'hp'
A_ : str = {}
A_ : List[Any] = None
@classmethod
def UpperCAmelCase_ ( cls , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Any = prefix
_SCREAMING_SNAKE_CASE : int = defaults
cls.build_naming_info()
@staticmethod
def UpperCAmelCase_ ( __snake_case , __snake_case ):
if len(__snake_case ) == 0:
return ""
_SCREAMING_SNAKE_CASE : List[Any] = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: \'{word}\' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__snake_case ) + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE : Optional[int] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = """"""
while integer != 0:
_SCREAMING_SNAKE_CASE : List[Any] = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
_SCREAMING_SNAKE_CASE : Any = 0
while True:
_SCREAMING_SNAKE_CASE : Optional[Any] = word + """#""" + int_to_alphabetic(__snake_case )
if sword in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE : List[str] = sword
break
_SCREAMING_SNAKE_CASE : List[str] = short_word
_SCREAMING_SNAKE_CASE : Any = word
return short_word
@staticmethod
def UpperCAmelCase_ ( __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = param_name.split("""_""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [TrialShortNamer.shortname_for_word(__snake_case , __snake_case ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_SCREAMING_SNAKE_CASE : Any = ["""""", """_"""]
for separator in separators:
_SCREAMING_SNAKE_CASE : Dict = separator.join(__snake_case )
if shortname not in info["reverse_short_param"]:
_SCREAMING_SNAKE_CASE : Any = shortname
_SCREAMING_SNAKE_CASE : Dict = param_name
return shortname
return param_name
@staticmethod
def UpperCAmelCase_ ( __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[Any] = TrialShortNamer.shortname_for_key(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Tuple = short_name
_SCREAMING_SNAKE_CASE : Optional[int] = param_name
@classmethod
def UpperCAmelCase_ ( cls ):
if cls.NAMING_INFO is not None:
return
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = info
@classmethod
def UpperCAmelCase_ ( cls , __snake_case ):
cls.build_naming_info()
assert cls.PREFIX is not None
_SCREAMING_SNAKE_CASE : Union[str, Any] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_SCREAMING_SNAKE_CASE : Union[str, Any] = cls.NAMING_INFO["""short_param"""][k]
if isinstance(__snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = 1 if v else 0
_SCREAMING_SNAKE_CASE : Dict = """""" if isinstance(__snake_case , (int, float) ) else """-"""
_SCREAMING_SNAKE_CASE : Any = f"""{key}{sep}{v}"""
name.append(__snake_case )
return "_".join(__snake_case )
@classmethod
def UpperCAmelCase_ ( cls , __snake_case ):
_SCREAMING_SNAKE_CASE : Any = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_SCREAMING_SNAKE_CASE : List[Any] = []
else:
_SCREAMING_SNAKE_CASE : str = repr.split("""_""" )
_SCREAMING_SNAKE_CASE : Dict = {}
for value in values:
if "-" in value:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = value.split("""-""" )
else:
_SCREAMING_SNAKE_CASE : List[Any] = re.sub("""[0-9.]""" , """""" , __snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = float(re.sub("""[^0-9.]""" , """""" , __snake_case ) )
_SCREAMING_SNAKE_CASE : int = cls.NAMING_INFO["""reverse_short_param"""][p_k]
_SCREAMING_SNAKE_CASE : int = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_SCREAMING_SNAKE_CASE : Optional[Any] = cls.DEFAULTS[k]
return parameters
| 200
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer']
_lowerCamelCase : Tuple = 'OwlViTImageProcessor'
_lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ):
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
A_ = kwargs.pop("feature_extractor" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )):
A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )]
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase ) != max_num_queries:
A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase ))
A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
encodings.append(UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ):
return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 312
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : Any = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_a : str = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __lowercase ( self , **_a ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> str:
_a : List[str] = self.get_tokenizer()
_a : Tuple = self.get_image_processor()
_a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Dict:
_a : List[str] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = self.prepare_image_inputs()
_a : List[Any] = image_processor(_a , return_tensors='''np''' )
_a : Dict = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Tuple = '''lower newer'''
_a : int = processor(text=_a )
_a : str = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> List[Any]:
_a : Any = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Union[str, Any] = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : int = processor.batch_decode(_a )
_a : int = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Tuple = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Optional[int] = '''lower newer'''
_a : Dict = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 15
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= "microsoft/speecht5_tts"
_a : Tuple= (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_a : Dict= "text_reader"
_a : Optional[Any]= SpeechTaProcessor
_a : Tuple= SpeechTaForTextToSpeech
_a : Optional[int]= SpeechTaHifiGan
_a : Union[str, Any]= ["text"]
_a : Optional[int]= ["audio"]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.post_processor is None:
lowercase : Any = """microsoft/speecht5_hifigan"""
super().setup()
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : int = self.pre_processor(text=snake_case ,return_tensors="""pt""" ,truncation=snake_case )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
lowercase : Tuple = load_dataset("""Matthijs/cmu-arctic-xvectors""" ,split="""validation""" )
lowercase : List[str] = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(snake_case ).cpu().detach()
| 20
|
"""simple docstring"""
lowercase__ = 0 # The first color of the flag.
lowercase__ = 1 # The second color of the flag.
lowercase__ = 2 # The third color of the flag.
lowercase__ = (red, white, blue)
def __lowerCamelCase ( __UpperCamelCase ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(__UpperCamelCase ) == 1:
return list(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[Any] = len(__UpperCamelCase ) - 1
lowerCAmelCase_ : Union[str, Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCAmelCase_ , lowerCAmelCase_ : str = sequence[high], sequence[mid]
high -= 1
else:
lowerCAmelCase_ : str = f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__UpperCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = input("""Enter numbers separated by commas:\n""").strip()
lowercase__ = [int(item.strip()) for item in user_input.split(""",""")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 241
| 0
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowercase__ ( snake_case_ :str = "https://www.worldometers.info/coronavirus" ):
__UpperCAmelCase = BeautifulSoup(requests.get(snake_case_ ).text , '''html.parser''' )
__UpperCAmelCase = soup.findAll('''h1''' )
__UpperCAmelCase = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(snake_case_ , snake_case_ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 357
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_lowercase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCAmelCase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : List[Any] , _lowercase : GenericTensor ):
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def a ( self : List[str] , _lowercase : GenericTensor ):
__UpperCAmelCase = self.get_masked_index(_lowercase )
__UpperCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def a ( self : Optional[int] , _lowercase : GenericTensor ):
if isinstance(_lowercase , _lowercase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowercase )
def a ( self : List[str] , _lowercase : Optional[int] , _lowercase : Tuple=None , **_lowercase : Tuple ):
if return_tensors is None:
__UpperCAmelCase = self.framework
__UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase )
self.ensure_exactly_one_mask_token(_lowercase )
return model_inputs
def a ( self : Optional[int] , _lowercase : Tuple ):
__UpperCAmelCase = self.model(**_lowercase )
__UpperCAmelCase = model_inputs['''input_ids''']
return model_outputs
def a ( self : Optional[int] , _lowercase : List[str] , _lowercase : Optional[Any]=5 , _lowercase : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__UpperCAmelCase = target_ids.shape[0]
__UpperCAmelCase = model_outputs['''input_ids'''][0]
__UpperCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__UpperCAmelCase = outputs.numpy()
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = stable_softmax(_lowercase , axis=-1 )
if target_ids is not None:
__UpperCAmelCase = tf.gather_nd(tf.squeeze(_lowercase , 0 ) , target_ids.reshape(-1 , 1 ) )
__UpperCAmelCase = tf.expand_dims(_lowercase , 0 )
__UpperCAmelCase = tf.math.top_k(_lowercase , k=_lowercase )
__UpperCAmelCase , __UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
__UpperCAmelCase = probs[..., target_ids]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(_lowercase )
__UpperCAmelCase = []
__UpperCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__UpperCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__UpperCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
__UpperCAmelCase = target_ids[p].tolist()
__UpperCAmelCase = p
# Filter padding out:
__UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
__UpperCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_lowercase )
result.append(_lowercase )
if single_mask:
return result[0]
return result
def a ( self : str , _lowercase : List[Any] , _lowercase : List[Any]=None ):
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = [targets]
try:
__UpperCAmelCase = self.tokenizer.get_vocab()
except Exception:
__UpperCAmelCase = {}
__UpperCAmelCase = []
for target in targets:
__UpperCAmelCase = vocab.get(_lowercase , _lowercase )
if id_ is None:
__UpperCAmelCase = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , max_length=1 , truncation=_lowercase , )['''input_ids''']
if len(_lowercase ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__UpperCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
__UpperCAmelCase = list(set(_lowercase ) )
if len(_lowercase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__UpperCAmelCase = np.array(_lowercase )
return target_ids
def a ( self : int , _lowercase : Dict=None , _lowercase : Optional[Any]=None ):
__UpperCAmelCase = {}
if targets is not None:
__UpperCAmelCase = self.get_target_ids(_lowercase , _lowercase )
__UpperCAmelCase = target_ids
if top_k is not None:
__UpperCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : Union[str, Any] , _lowercase : Optional[Any] , *_lowercase : Union[str, Any] , **_lowercase : int ):
__UpperCAmelCase = super().__call__(_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
| 86
| 0
|
"""simple docstring"""
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module):
def __init__( self , a="sayef/fsner-bert-base-uncased" ) -> Tuple:
super(a , self ).__init__()
lowercase__ : str = AutoModel.from_pretrained(a , return_dict=a )
lowercase__ : Any = torch.nn.CosineSimilarity(3 , 1e-08 )
lowercase__ : Union[str, Any] = torch.nn.Softmax(dim=1 )
def _UpperCAmelCase ( self , **a ) -> List[str]:
return self.bert(**a ).last_hidden_state
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
return token_embeddings.sum(2 , keepdim=a )
def _UpperCAmelCase ( self , a , a , a=1 ) -> str:
return self.softmax(T * self.cos(a , a ) )
def _UpperCAmelCase ( self , a , a ) -> str:
lowercase__ : Union[str, Any] = W_supports['sizes'].tolist()
lowercase__ : str = W_supports['start_token_id'].item()
lowercase__ : str = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase__ : Any = self.BERT(**a )
lowercase__ : Any = self.BERT(**a )
lowercase__ : Tuple = None
lowercase__ : Dict = None
lowercase__ : str = W_supports['input_ids'] == start_token_id
lowercase__ : List[str] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(a ):
if i == 0:
lowercase__ : int = 0
else:
lowercase__ : Dict = support_sizes[i - 1]
lowercase__ : Dict = S[s : s + size][start_token_masks[s : s + size]]
lowercase__ : int = S[s : s + size][end_token_masks[s : s + size]]
lowercase__ : Any = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase__ : int = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase__ : Tuple = torch.vstack((p_starts, p_start) )
lowercase__ : Tuple = torch.vstack((p_ends, p_end) )
else:
lowercase__ : Optional[int] = p_start
lowercase__ : Optional[int] = p_end
return p_starts, p_ends
| 77
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : Tuple = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 77
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 299
|
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger('''transformers.models.speecht5''')
UpperCamelCase__ = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
UpperCamelCase__ = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
UpperCamelCase__ = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
UpperCamelCase__ = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
UpperCamelCase__ = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
UpperCamelCase__ = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
UpperCamelCase__ = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
UpperCamelCase__ = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = []
UpperCamelCase__ = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
for attribute in key.split('''.''' ):
UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase__ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase__ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Tuple = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCAmelCase__ : int = value
elif weight_type == "running_mean":
UpperCAmelCase__ : int = value
elif weight_type == "running_var":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ : List[Any] = value
else:
UpperCAmelCase__ : Union[str, Any] = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase__ , UpperCAmelCase__ : int = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : int = []
if task == "s2t":
UpperCAmelCase__ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : List[Any] = MAPPING_S2T
UpperCAmelCase__ : int = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Tuple = MAPPING_T2S
UpperCAmelCase__ : Union[str, Any] = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : Tuple = MAPPING_S2S
UpperCAmelCase__ : int = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
UpperCAmelCase__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase__ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
UpperCAmelCase__ : List[str] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase__ : Any = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
UpperCAmelCase__ : Union[str, Any] = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
UpperCAmelCase__ : Dict = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase__ : Union[str, Any] = '''weight_v'''
elif "bias" in name:
UpperCAmelCase__ : Optional[int] = '''bias'''
elif "weight" in name:
UpperCAmelCase__ : Optional[int] = '''weight'''
elif "running_mean" in name:
UpperCAmelCase__ : Optional[int] = '''running_mean'''
elif "running_var" in name:
UpperCAmelCase__ : List[Any] = '''running_var'''
elif "num_batches_tracked" in name:
UpperCAmelCase__ : Optional[Any] = '''num_batches_tracked'''
else:
UpperCAmelCase__ : Union[str, Any] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase__ : Optional[Any] = name.split('''.''' )
UpperCAmelCase__ : Any = int(items[0] )
UpperCAmelCase__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Any:
if config_path is not None:
UpperCAmelCase__ : Optional[Any] = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase__ : str = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase__ : str = config.max_text_positions
UpperCAmelCase__ : List[str] = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
UpperCAmelCase__ : Tuple = 18_76
UpperCAmelCase__ : int = 6_00
UpperCAmelCase__ : Union[str, Any] = config.max_speech_positions
UpperCAmelCase__ : Optional[Any] = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
UpperCAmelCase__ : Tuple = 18_76
UpperCAmelCase__ : Optional[Any] = config.max_speech_positions
UpperCAmelCase__ : Dict = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
UpperCAmelCase__ : Tuple = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase__ : Dict = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
UpperCAmelCase__ : int = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
UpperCAmelCase__ : Optional[Any] = SpeechTaFeatureExtractor()
UpperCAmelCase__ : Any = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase__ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 299
| 1
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
_lowercase =self.layer[current_layer](lowerCAmelCase , lowerCAmelCase , head_mask[current_layer] )
_lowercase =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =BertEncoderWithPabee(lowerCAmelCase )
self.init_weights()
_lowercase =0
_lowercase =0
_lowercase =0
_lowercase =0
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =threshold
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =patience
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =0
_lowercase =0
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.inference_layers_num / self.inference_instances_num
_lowercase =(
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase )
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , ) -> str:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase =input_ids.size()
elif inputs_embeds is not None:
_lowercase =inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
if token_type_ids is None:
_lowercase =torch.zeros(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_lowercase , _lowercase , _lowercase =encoder_hidden_states.size()
_lowercase =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
_lowercase =self.invert_attention_mask(lowerCAmelCase )
else:
_lowercase =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase =self.get_head_mask(lowerCAmelCase , self.config.num_hidden_layers )
_lowercase =self.embeddings(
input_ids=lowerCAmelCase , position_ids=lowerCAmelCase , token_type_ids=lowerCAmelCase , inputs_embeds=lowerCAmelCase )
_lowercase =embedding_output
if self.training:
_lowercase =[]
for i in range(self.config.num_hidden_layers ):
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](output_dropout(lowerCAmelCase ) )
res.append(lowerCAmelCase )
elif self.patience == 0: # Use all layers for inference
_lowercase =self.encoder(
lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
_lowercase =self.pooler(encoder_outputs[0] )
_lowercase =[output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase )]
else:
_lowercase =0
_lowercase =None
_lowercase =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](lowerCAmelCase )
if regression:
_lowercase =logits.detach()
if patient_result is not None:
_lowercase =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_lowercase =0
else:
_lowercase =logits.detach().argmax(dim=1 )
if patient_result is not None:
_lowercase =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase ) ):
patient_counter += 1
else:
_lowercase =0
_lowercase =logits
if patient_counter == self.patience:
break
_lowercase =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =config.num_labels
_lowercase =BertModelWithPabee(lowerCAmelCase )
_lowercase =nn.Dropout(config.hidden_dropout_prob )
_lowercase =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.bert(
input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , position_ids=lowerCAmelCase , head_mask=lowerCAmelCase , inputs_embeds=lowerCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_lowercase =(logits[-1],)
if labels is not None:
_lowercase =None
_lowercase =0
for ix, logits_item in enumerate(lowerCAmelCase ):
if self.num_labels == 1:
# We are doing regression
_lowercase =MSELoss()
_lowercase =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_lowercase =CrossEntropyLoss()
_lowercase =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_lowercase =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_lowercase =(total_loss / total_weights,) + outputs
return outputs
| 205
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_A = logging.getLogger(__name__)
def lowerCamelCase__ ( a__ : Tuple , a__ : Optional[int] ) -> Dict:
return (preds == labels).mean()
@dataclass
class lowercase_ :
A__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowercase_ :
A__ : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
A__ : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
A__ : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
A__ : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCamelCase__ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , a__ )
# Set seed
set_seed(training_args.seed )
try:
UpperCamelCase_ = processors[data_args.task_name]()
UpperCamelCase_ = processor.get_labels()
UpperCamelCase_ = len(a__ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(a__ : EvalPrediction ) -> Dict:
UpperCamelCase_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(a__ , p.label_ids )}
# Data collator
UpperCamelCase_ = DataCollatorWithPadding(a__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase_ = Trainer(
model=a__ , args=a__ , train_dataset=a__ , eval_dataset=a__ , compute_metrics=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase_ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_ = trainer.evaluate()
UpperCamelCase_ = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(a__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , a__ , a__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(a__ )
return results
def lowerCamelCase__ ( a__ : int ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 362
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 261
| 0
|
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowerCAmelCase = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowerCAmelCase = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowerCAmelCase = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowerCAmelCase = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowerCAmelCase = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowerCAmelCase = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowerCAmelCase = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def UpperCAmelCase_ ():
"""simple docstring"""
_a, _a : Optional[Any] = randrange(len(__a ) ), randrange(len(__a ) )
_a : Any = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
_a, _a : List[Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCAmelCase_ (__a : int = 1_0_0 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(__a ))
@pytest.mark.parametrize('hand, expected' , __a )
def UpperCAmelCase_ (__a : List[Any] , __a : Any ):
"""simple docstring"""
assert PokerHand(__a )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __a )
def UpperCAmelCase_ (__a : Tuple , __a : str ):
"""simple docstring"""
assert PokerHand(__a )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __a )
def UpperCAmelCase_ (__a : Any , __a : List[str] , __a : Any ):
"""simple docstring"""
_a : str = PokerHand(__a )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __a )
def UpperCAmelCase_ (__a : Optional[int] , __a : Optional[Any] ):
"""simple docstring"""
assert PokerHand(__a )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __a )
def UpperCAmelCase_ (__a : int , __a : Dict ):
"""simple docstring"""
assert PokerHand(__a )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __a )
def UpperCAmelCase_ (__a : Tuple , __a : Optional[Any] , __a : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(__a ).compare_with(PokerHand(__a ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def UpperCAmelCase_ (__a : Optional[Any] , __a : Any , __a : Optional[Any] ):
"""simple docstring"""
assert PokerHand(__a ).compare_with(PokerHand(__a ) ) == expected
def UpperCAmelCase_ ():
"""simple docstring"""
_a : str = [PokerHand(__a ) for hand in SORTED_HANDS]
_a : List[Any] = poker_hands.copy()
shuffle(__a )
_a : List[Any] = chain(sorted(__a ) )
for index, hand in enumerate(__a ):
assert hand == poker_hands[index]
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__a )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = PokerHand('2C 4S AS 3D 5C' )
_a : List[str] = True
_a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = 0
_a : int = os.path.abspath(os.path.dirname(__a ) )
_a : Union[str, Any] = os.path.join(__a , 'poker_hands.txt' )
with open(__a ) as file_hand:
for line in file_hand:
_a : Tuple = line[:1_4].strip()
_a : Optional[int] = line[1_5:].strip()
_a, _a : Any = PokerHand(__a ), PokerHand(__a )
_a : List[str] = player.compare_with(__a )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 271
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ,_a : T ):
'''simple docstring'''
_a : List[str] = data
_a : Node[T] | None = None
def __str__( self : Dict ):
'''simple docstring'''
return F"""{self.data}"""
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
_a : Node[T] | None = None
def __iter__( self : str ):
'''simple docstring'''
_a : Tuple = self.top
while node:
yield node.data
_a : int = node.next
def __str__( self : str ):
'''simple docstring'''
return "->".join([str(_a ) for item in self] )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __lowercase ( self : str ):
'''simple docstring'''
return self.top is None
def __lowercase ( self : List[Any] ,_a : T ):
'''simple docstring'''
_a : int = Node(_a )
if not self.is_empty():
_a : Optional[Any] = self.top
_a : List[str] = node
def __lowercase ( self : Tuple ):
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top ,_a )
_a : List[Any] = self.top
_a : int = self.top.next
return pop_node.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 271
| 1
|
'''simple docstring'''
import numpy
# List of input, output pairs
A_ : List[str] = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
A_ : List[Any] = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
A_ : int = [2, 4, 1, 5]
A_ : Optional[Any] = len(train_data)
A_ : Optional[Any] = 0.009
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_="train" )-> List[str]:
'''simple docstring'''
return calculate_hypothesis_value(lowerCAmelCase_ , lowerCAmelCase_ ) - output(
lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Dict = 0
for i in range(len(lowerCAmelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> str:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_=m )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = 0
for i in range(lowerCAmelCase_ ):
if index == -1:
summation_value += _error(lowerCAmelCase_ )
else:
summation_value += _error(lowerCAmelCase_ ) * train_data[i][0][index]
return summation_value
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = summation_of_cost_derivative(lowerCAmelCase_ , lowerCAmelCase_ ) / m
return cost_derivative_value
def snake_case_ ( )-> Any:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_UpperCAmelCase : Optional[int] = 0.0_0_0_0_0_2
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Optional[Any] = 0
while True:
j += 1
_UpperCAmelCase : List[str] = [0, 0, 0, 0]
for i in range(0 , len(lowerCAmelCase_ ) ):
_UpperCAmelCase : Tuple = get_cost_derivative(i - 1 )
_UpperCAmelCase : List[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase_ , lowerCAmelCase_ , atol=lowerCAmelCase_ , rtol=lowerCAmelCase_ , ):
break
_UpperCAmelCase : str = temp_parameter_vector
print(("""Number of iterations:""", j) )
def snake_case_ ( )-> List[str]:
'''simple docstring'''
for i in range(len(lowerCAmelCase_ ) ):
print(("""Actual output value:""", output(lowerCAmelCase_ , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(lowerCAmelCase_ , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 349
|
'''simple docstring'''
import math
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
_UpperCAmelCase : int = 0
while arr[min(lowerCAmelCase_ , lowerCAmelCase_ ) - 1] < x:
_UpperCAmelCase : Optional[int] = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCAmelCase : List[Any] = prev + 1
if prev == min(lowerCAmelCase_ , lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ : str = input("""Enter numbers separated by a comma:\n""").strip()
A_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
A_ : int = int(input("""Enter the number to be searched:\n"""))
A_ : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 349
| 1
|
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
UpperCAmelCase__ = [True] * (num + 1)
UpperCAmelCase__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __UpperCAmelCase ):
UpperCAmelCase__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Any = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 169
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : Tuple = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ : int = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
lowerCamelCase__ : str = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : Tuple , _lowerCAmelCase : Dict=None , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : str="[UNK]" , _lowerCAmelCase : Union[str, Any]="[SEP]" , _lowerCAmelCase : List[Any]="[PAD]" , _lowerCAmelCase : str="[CLS]" , _lowerCAmelCase : Dict="[MASK]" , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : str , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = strip_accents
SCREAMING_SNAKE_CASE_ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ = normalizer_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = do_lower_case
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]=None ):
SCREAMING_SNAKE_CASE_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 225
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__lowercase : Optional[int] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowercase : List[Any] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
__lowercase : List[Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
__lowercase : str = '▁'
# Segments (not really needed)
__lowercase : Dict = 0
__lowercase : Optional[int] = 1
__lowercase : Any = 2
__lowercase : List[str] = 3
__lowercase : int = 4
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = "left"
A_ = XLNetTokenizer
def __init__( self , __a=None , __a=None , __a=False , __a=True , __a=False , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<sep>" , __a="<pad>" , __a="<cls>" , __a="<mask>" , __a=["<eop>", "<eod>"] , **__a , ):
'''simple docstring'''
__a : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
vocab_file=__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , )
__a : Any = 3
__a : Optional[int] = do_lower_case
__a : Optional[int] = remove_space
__a : Optional[Any] = keep_accents
__a : Union[str, Any] = vocab_file
__a : Tuple = False if not self.vocab_file else True
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : Dict = [self.sep_token_id]
__a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : List[str] = [self.sep_token_id]
__a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Union[str, Any] = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 294
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'laion/clap-htsat-unfused'
__a : Optional[Any] = tempfile.mkdtemp()
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_tokenizer()
__a : List[str] = self.get_feature_extractor()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
__a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
__a : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : int = self.get_tokenizer()
__a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : int = floats_list((3, 1000) )
__a : str = feature_extractor(__a , return_tensors='np' )
__a : int = processor(audios=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_feature_extractor()
__a : Any = self.get_tokenizer()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Union[str, Any] = 'This is a test string'
__a : Union[str, Any] = processor(text=__a )
__a : Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : str = self.get_tokenizer()
__a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Optional[int] = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_feature_extractor()
__a : Optional[int] = self.get_tokenizer()
__a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 294
| 1
|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:int = int(snake_case )
assert noofclusters < len(snake_case )
# Find out the dimensionality
SCREAMING_SNAKE_CASE:str = len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE:str = list(range(len(snake_case ) ) )
shuffle(snake_case )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE:List[str] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE:int = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE:List[Any] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(snake_case )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE:List[Any] = tf.placeholder("float64" , [dim] )
SCREAMING_SNAKE_CASE:Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(snake_case , snake_case ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE:Dict = [tf.Variable(0 ) for i in range(len(snake_case ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE:int = tf.placeholder("int32" )
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(snake_case , snake_case ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE:str = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE:Optional[int] = tf.reduce_mean(snake_case , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE:Any = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE:Union[str, Any] = tf.placeholder("float" , [dim] )
SCREAMING_SNAKE_CASE:Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(snake_case , snake_case ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE:List[Any] = tf.placeholder("float" , [noofclusters] )
SCREAMING_SNAKE_CASE:int = tf.argmin(snake_case , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE:List[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(snake_case )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE:Union[str, Any] = 100
for _ in range(snake_case ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(snake_case ) ):
SCREAMING_SNAKE_CASE:Union[str, Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE:Optional[Any] = [
sess.run(snake_case , feed_dict={va: vect, va: sess.run(snake_case )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE:List[Any] = sess.run(
snake_case , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(snake_case ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE:Union[str, Any] = [
vectors[i]
for i in range(len(snake_case ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE:List[str] = sess.run(
snake_case , feed_dict={mean_input: array(snake_case )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE:Tuple = sess.run(snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = sess.run(snake_case )
return centroids, assignments
| 139
|
'''simple docstring'''
import numpy
# List of input, output pairs
A_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
A_ = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
A_ = [2, 4, 1, 5]
A_ = len(train_data)
A_ = 0.009
def A_ ( snake_case , snake_case="train" ):
return calculate_hypothesis_value(snake_case , snake_case ) - output(
snake_case , snake_case )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = 0
for i in range(len(snake_case ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A_ ( snake_case , snake_case ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A_ ( snake_case , snake_case ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A_ ( snake_case , snake_case=m ):
SCREAMING_SNAKE_CASE:Dict = 0
for i in range(snake_case ):
if index == -1:
summation_value += _error(snake_case )
else:
summation_value += _error(snake_case ) * train_data[i][0][index]
return summation_value
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = summation_of_cost_derivative(snake_case , snake_case ) / m
return cost_derivative_value
def A_ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
SCREAMING_SNAKE_CASE:List[str] = 0.00_0002
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
while True:
j += 1
SCREAMING_SNAKE_CASE:List[str] = [0, 0, 0, 0]
for i in range(0 , len(snake_case ) ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_cost_derivative(i - 1 )
SCREAMING_SNAKE_CASE:Union[str, Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
snake_case , snake_case , atol=snake_case , rtol=snake_case , ):
break
SCREAMING_SNAKE_CASE:List[str] = temp_parameter_vector
print(("Number of iterations:", j) )
def A_ ( ):
for i in range(len(snake_case ) ):
print(("Actual output value:", output(snake_case , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(snake_case , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 139
| 1
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('''fixtures''')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('''fixtures/dummy-config.json''')
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = 0
def lowercase_ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase ).to_dict()
config_dict.pop('''feature_extractor_type''' )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(**__lowerCamelCase )
# save in new folder
model_config.save_pretrained(__lowerCamelCase )
config.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[str] ) -> Tuple:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def lowercase_ ( self : List[str] ) -> int:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Dict ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase_ ( self : List[Any] ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def lowercase_ ( self : List[Any] ) -> Tuple:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Optional[Any] ) -> int:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = True
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(__lowerCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 359
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
SCREAMING_SNAKE_CASE__ = (low + high) // 2
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = max_subarray(_A , _A , _A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = max_subarray(_A , mid + 1 , _A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = max_cross_sum(_A , _A , _A , _A )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = float('''-inf''' ), -1
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = float('''-inf''' ), -1
SCREAMING_SNAKE_CASE__ = 0
for i in range(_A , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
SCREAMING_SNAKE_CASE__ = summ
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
SCREAMING_SNAKE_CASE__ = summ
SCREAMING_SNAKE_CASE__ = i
return max_left, max_right, (left_sum + right_sum)
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [randint(1 , _A ) for _ in range(_A )]
SCREAMING_SNAKE_CASE__ = time.time()
max_subarray(_A , 0 , input_size - 1 )
SCREAMING_SNAKE_CASE__ = time.time()
return end - start
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
SCREAMING_SNAKE_CASE__ = [time_max_subarray(_A ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(_A , _A ):
print(_A , '''\t\t''' , _A )
plt.plot(_A , _A )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 218
| 0
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Tuple = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Tuple = {
'Salesforce/codegen-350M-mono': 2048,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Any = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Dict = CodeGenTokenizer
def __init__( self : str , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , __lowerCamelCase : int=None , __lowerCamelCase : List[Any]="<|endoftext|>" , __lowerCamelCase : str="<|endoftext|>" , __lowerCamelCase : List[Any]="<|endoftext|>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Optional[int] , ) -> Optional[int]:
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
if kwargs.pop("add_bos_token" , __lowerCamelCase ):
a = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
a = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
a = add_prefix_space
a = pre_tok_class(**__lowerCamelCase )
a = add_prefix_space
def __UpperCAmelCase ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[int] ) -> BatchEncoding:
a = kwargs.get("is_split_into_words" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[str] ) -> BatchEncoding:
a = kwargs.get("is_split_into_words" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
a = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , __lowerCamelCase : bool = False , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[List[str]] = None , **__lowerCamelCase : int , ) -> str:
a = super().decode(
token_ids=__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , **__lowerCamelCase , )
if truncate_before_pattern is not None and len(__lowerCamelCase ) > 0:
a = self.truncate(__lowerCamelCase , __lowerCamelCase )
return decoded_text
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] ) -> int:
def find_re(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ):
a = pattern.search(__lowerCamelCase , __lowerCamelCase )
return m.start() if m else -1
a = [re.compile(__lowerCamelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
a = list(re.finditer("^print" , __lowerCamelCase , re.MULTILINE ) )
if len(__lowerCamelCase ) > 1:
a = completion[: prints[1].start()]
a = list(re.finditer("^def" , __lowerCamelCase , re.MULTILINE ) )
if len(__lowerCamelCase ) > 1:
a = completion[: defs[1].start()]
a = 0
a = [
pos for pos in [find_re(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for terminal in terminals] if pos != -1
]
if len(__lowerCamelCase ) > 0:
return completion[: min(__lowerCamelCase )]
else:
return completion
| 107
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=99 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=36 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : Union[str, Any]=6 , UpperCamelCase__ : int=37 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=16 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Dict=None , ) -> Any:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = embedding_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_hidden_groups
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowercase ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = AlbertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
__magic_name__ = AlbertForPreTraining(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , sentence_order_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = AlbertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = AlbertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = AlbertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = AlbertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.num_choices
__magic_name__ = AlbertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
def _lowercase ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def _lowercase ( self : int ) -> int:
"""simple docstring"""
__magic_name__ = AlbertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = AlbertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = AlbertModel.from_pretrained("""albert-base-v2""" )
__magic_name__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__magic_name__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__magic_name__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
__magic_name__ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) )
| 88
| 0
|
import argparse
from collections import defaultdict
import yaml
_snake_case : Optional[int] = 'docs/source/en/_toctree.yml'
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = defaultdict(lowerCAmelCase_ )
__lowerCAmelCase = []
__lowerCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(lowerCAmelCase_ )
__lowerCAmelCase = new_doc_list
__lowerCAmelCase = [key for key, value in counts.items() if value > 1]
__lowerCAmelCase = []
for duplicate_key in duplicates:
__lowerCAmelCase = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(lowerCAmelCase_ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__lowerCAmelCase = sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCAmelCase_ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(lowerCAmelCase_ )
# Sort
return overview_doc
def a_ ( lowerCAmelCase_ : Tuple=False ):
with open(lowerCAmelCase_, encoding='utf-8' ) as f:
__lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
__lowerCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__lowerCAmelCase = api_doc[scheduler_idx]['sections']
__lowerCAmelCase = clean_doc_toc(lowerCAmelCase_ )
__lowerCAmelCase = False
if new_scheduler_doc != scheduler_doc:
__lowerCAmelCase = True
if overwrite:
__lowerCAmelCase = new_scheduler_doc
if diff:
if overwrite:
__lowerCAmelCase = api_doc
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase_, allow_unicode=lowerCAmelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def a_ ( lowerCAmelCase_ : Dict=False ):
with open(lowerCAmelCase_, encoding='utf-8' ) as f:
__lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
__lowerCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__lowerCAmelCase = False
__lowerCAmelCase = api_doc[pipeline_idx]['sections']
__lowerCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__lowerCAmelCase = pipeline_doc['section']
__lowerCAmelCase = clean_doc_toc(lowerCAmelCase_ )
if overwrite:
__lowerCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCAmelCase_ )
# sort overall pipeline doc
__lowerCAmelCase = clean_doc_toc(lowerCAmelCase_ )
if new_pipeline_docs != pipeline_docs:
__lowerCAmelCase = True
if overwrite:
__lowerCAmelCase = new_pipeline_docs
if diff:
if overwrite:
__lowerCAmelCase = api_doc
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase_, allow_unicode=lowerCAmelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_snake_case : Union[str, Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 207
|
from functools import lru_cache
@lru_cache
def a_ ( lowerCAmelCase_ : int ):
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str = " " ) -> list:
'''simple docstring'''
A__ = []
A__ = 0
for index, char in enumerate(SCREAMING_SNAKE_CASE_ ):
if char == separator:
split_words.append(string[last_index:index] )
A__ = index + 1
elif index + 1 == len(SCREAMING_SNAKE_CASE_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 68
|
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A_ = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _snake_case :
_A : str
_A : Optional[str] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[Any] ):
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def __UpperCamelCase ( self : List[Any] ):
return self.major, self.minor, self.patch
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ):
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return Version(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return other
raise TypeError(F'''{other} (type {type(SCREAMING_SNAKE_CASE__ )}) cannot be compared to version.''' )
def __eq__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ):
try:
SCREAMING_SNAKE_CASE:List[str] = self._validate_operand(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Tuple = self._validate_operand(SCREAMING_SNAKE_CASE__ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __UpperCamelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __UpperCamelCase ( self : Tuple ):
return self.version_str
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = _VERSION_REG.match(snake_case )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(snake_case ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def A_ ( snake_case ):
return ".".join(str(snake_case ) for v in version_tuple )
| 139
| 0
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase : str = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """esm"""
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0_2_6 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Union[str, Any] =vocab_size
a__ : Optional[int] =hidden_size
a__ : int =num_hidden_layers
a__ : Union[str, Any] =num_attention_heads
a__ : Any =intermediate_size
a__ : Any =hidden_dropout_prob
a__ : Dict =attention_probs_dropout_prob
a__ : Optional[int] =max_position_embeddings
a__ : int =initializer_range
a__ : str =layer_norm_eps
a__ : Optional[Any] =position_embedding_type
a__ : Optional[int] =use_cache
a__ : Union[str, Any] =emb_layer_norm_before
a__ : Tuple =token_dropout
a__ : Tuple =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
a__ : Optional[Any] =EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Optional[Any] =EsmFoldConfig(**lowerCAmelCase__ )
a__ : str =esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
a__ : List[Any] =get_default_vocab_list()
else:
a__ : str =vocab_list
else:
a__ : Tuple =None
a__ : Optional[int] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
a__ : str =self.esmfold_config.to_dict()
return output
@dataclass
class __lowerCAmelCase :
_lowercase : str = None
_lowercase : bool = True
_lowercase : bool = False
_lowercase : bool = False
_lowercase : bool = False
_lowercase : float = 0
_lowercase : bool = True
_lowercase : bool = False
_lowercase : int = 128
_lowercase : "TrunkConfig" = None
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.trunk is None:
a__ : int =TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
a__ : Optional[Any] =TrunkConfig(**self.trunk )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Tuple =asdict(self )
a__ : Dict =self.trunk.to_dict()
return output
@dataclass
class __lowerCAmelCase :
_lowercase : int = 48
_lowercase : int = 1024
_lowercase : int = 128
_lowercase : int = 32
_lowercase : int = 32
_lowercase : int = 32
_lowercase : float = 0
_lowercase : float = 0
_lowercase : bool = False
_lowercase : int = 4
_lowercase : Optional[int] = 128
_lowercase : "StructureModuleConfig" = None
def _lowercase ( self ) -> Dict:
'''simple docstring'''
if self.structure_module is None:
a__ : Optional[Any] =StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
a__ : List[str] =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
a__ : List[str] =self.sequence_state_dim // self.sequence_head_width
a__ : Optional[Any] =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =asdict(self )
a__ : Tuple =self.structure_module.to_dict()
return output
@dataclass
class __lowerCAmelCase :
_lowercase : int = 384
_lowercase : int = 128
_lowercase : int = 16
_lowercase : int = 128
_lowercase : int = 12
_lowercase : int = 4
_lowercase : int = 8
_lowercase : float = 0.1
_lowercase : int = 8
_lowercase : int = 1
_lowercase : int = 2
_lowercase : int = 7
_lowercase : int = 10
_lowercase : float = 1E-8
_lowercase : float = 1E5
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return asdict(self )
def _A ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 148
|
from __future__ import annotations
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : int =TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(lowerCAmelCase__ ) != 0:
a__ : List[str] =len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise error
a__ : List[Any] =rows
else:
a__ : str =[]
def _lowercase ( self ) -> list[list[int]]:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.rows )
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.rows[0] )
@property
def _lowercase ( self ) -> tuple[int, int]:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def _lowercase ( self ) -> bool:
'''simple docstring'''
return self.order[0] == self.order[1]
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : str =[
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def _lowercase ( self ) -> int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowercase ( self ) -> bool:
'''simple docstring'''
return bool(self.determinant() )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : List[str] =[
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase__ ).determinant()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
return -1 * self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : Dict =[
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : Union[str, Any] =self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
'''simple docstring'''
return str(self.rows )
def __str__( self ) -> str:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(lowerCAmelCase__ ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None:
'''simple docstring'''
a__ : List[str] =TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(lowerCAmelCase__ )
else:
a__ : Tuple =self.rows[0:position] + [row] + self.rows[position:]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None:
'''simple docstring'''
a__ : str =TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
a__ : Optional[Any] =[self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
a__ : Any =[
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
return not self == other
def __neg__( self ) -> Matrix:
'''simple docstring'''
return self * -1
def __add__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase__ , lowerCAmelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
a__ : Tuple =self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148
| 1
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Tuple:
super().tearDown()
gc.collect()
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_lowerCAmelCase = "A painting of a squirrel eating a burger"
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = sd_pipe.prepare_inputs(_lowerCAmelCase )
_lowerCAmelCase = replicate(_lowerCAmelCase )
_lowerCAmelCase = shard(_lowerCAmelCase )
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = jax.random.split(_lowerCAmelCase , jax.device_count() )
_lowerCAmelCase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCAmelCase = images[0, 253:256, 253:256, -1]
_lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = "stabilityai/stable-diffusion-2"
_lowerCAmelCase , _lowerCAmelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="bf16" , dtype=jnp.bfloataa , )
_lowerCAmelCase = scheduler_params
_lowerCAmelCase = "A painting of a squirrel eating a burger"
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = sd_pipe.prepare_inputs(_lowerCAmelCase )
_lowerCAmelCase = replicate(_lowerCAmelCase )
_lowerCAmelCase = shard(_lowerCAmelCase )
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = jax.random.split(_lowerCAmelCase , jax.device_count() )
_lowerCAmelCase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCAmelCase = images[0, 253:256, 253:256, -1]
_lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 158
|
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335
| 0
|
import datasets
snake_case = """\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"""
snake_case = """\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"""
snake_case = """\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"""
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
| 371
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : float , lowercase : float , lowercase : float , lowercase : float , lowercase : float , ) -> float:
_a = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
_a = 1 - (matter_density + radiation_density + dark_energy)
_a = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_a = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase_ : Optional[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 63
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A : Dict = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__A : Optional[int] = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__A : Dict = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[List[List[str]]] , UpperCAmelCase_ : List[List[str]] , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCAmelCase_ , hypotheses=UpperCAmelCase_ , min_len=UpperCAmelCase_ , max_len=UpperCAmelCase_ )
}
| 138
| 0
|
'''simple docstring'''
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :List[Any] ):
_lowerCAmelCase = """"""
for i in table:
res += inp[i - 1]
return res
def A (__lowerCamelCase :Dict ):
return data[1:] + data[0]
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = """"""
for i in range(len(__lowerCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def A (__lowerCamelCase :Optional[int] , __lowerCamelCase :List[str] ):
_lowerCAmelCase = int("""0b""" + data[0] + data[-1] , 2 )
_lowerCAmelCase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def A (__lowerCamelCase :Optional[int] , __lowerCamelCase :str , __lowerCamelCase :List[str] , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple ):
_lowerCAmelCase = message[:4]
_lowerCAmelCase = message[4:]
_lowerCAmelCase = apply_table(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = xor(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = apply_sbox(__lowerCamelCase , temp[:4] ) # noqa: E741
_lowerCAmelCase = apply_sbox(__lowerCamelCase , temp[4:] )
_lowerCAmelCase = """0""" * (2 - len(__lowerCamelCase )) + l # noqa: E741
_lowerCAmelCase = """0""" * (2 - len(__lowerCamelCase )) + r
_lowerCAmelCase = apply_table(l + r , __lowerCamelCase )
_lowerCAmelCase = xor(__lowerCamelCase , __lowerCamelCase )
return temp + right
if __name__ == "__main__":
_lowercase = input("""Enter 10 bit key: """)
_lowercase = input("""Enter 8 bit message: """)
_lowercase = [6, 3, 7, 4, 8, 5, 10, 9]
_lowercase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowercase = [2, 4, 3, 1]
_lowercase = [2, 6, 3, 1, 4, 8, 5, 7]
_lowercase = [4, 1, 3, 5, 7, 2, 8, 6]
_lowercase = [4, 1, 2, 3, 2, 3, 4, 1]
_lowercase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowercase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowercase = apply_table(key, paa_table)
_lowercase = temp[:5]
_lowercase = temp[5:]
_lowercase = left_shift(left)
_lowercase = left_shift(right)
_lowercase = apply_table(left + right, pa_table)
_lowercase = left_shift(left)
_lowercase = left_shift(right)
_lowercase = left_shift(left)
_lowercase = left_shift(right)
_lowercase = apply_table(left + right, pa_table)
# encryption
_lowercase = apply_table(message, IP)
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = temp[4:] + temp[:4]
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
_lowercase = apply_table(CT, IP)
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = temp[4:] + temp[:4]
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 229
|
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = order
# a_{0} ... a_{k}
_lowerCAmelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_lowerCAmelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_lowerCAmelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
_lowerCAmelCase = [0.0] * self.order
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
if len(_lowercase ) < self.order:
_lowerCAmelCase = [1.0, *a_coeffs]
if len(_lowercase ) != self.order + 1:
_lowerCAmelCase = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(_lowercase )}'
)
raise ValueError(_lowercase )
if len(_lowercase ) != self.order + 1:
_lowerCAmelCase = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(_lowercase )}'
)
raise ValueError(_lowercase )
_lowerCAmelCase = a_coeffs
_lowerCAmelCase = b_coeffs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_lowerCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_lowerCAmelCase = self.input_history[:-1]
_lowerCAmelCase = self.output_history[:-1]
_lowerCAmelCase = sample
_lowerCAmelCase = result
return result
| 229
| 1
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowercase : Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
lowercase : Optional[Any] = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
lowercase : Optional[int] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 255.0
lowercase : Tuple = image.transpose(0 , 3 , 1 , 2 )
lowercase : Dict = 2.0 * image - 1.0
lowercase : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
lowercase : Any = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.9995 ) -> Any:
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
lowercase : Optional[Any] = True
lowercase : Any = va.device
lowercase : Tuple = va.cpu().numpy()
lowercase : Dict = va.cpu().numpy()
lowercase : Any = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
lowercase : Any = (1 - t) * va + t * va
else:
lowercase : int = np.arccos(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.sin(SCREAMING_SNAKE_CASE__ )
lowercase : str = theta_a * t
lowercase : List[Any] = np.sin(SCREAMING_SNAKE_CASE__ )
lowercase : int = np.sin(theta_a - theta_t ) / sin_theta_a
lowercase : int = sin_theta_t / sin_theta_a
lowercase : Dict = sa * va + sa * va
if inputs_are_torch:
lowercase : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Union[str, Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
lowercase : List[Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
for param in model.parameters():
lowercase : List[str] = value
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=None ,snake_case=None ,snake_case=None ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case ,text_encoder=snake_case ,clip_model=snake_case ,tokenizer=snake_case ,unet=snake_case ,scheduler=snake_case ,feature_extractor=snake_case ,coca_model=snake_case ,coca_tokenizer=snake_case ,coca_transform=snake_case ,)
lowercase : Optional[int] = (
feature_extractor.size
if isinstance(feature_extractor.size ,snake_case )
else feature_extractor.size["""shortest_edge"""]
)
lowercase : Dict = transforms.Normalize(mean=feature_extractor.image_mean ,std=feature_extractor.image_std )
set_requires_grad(self.text_encoder ,snake_case )
set_requires_grad(self.clip_model ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
set_requires_grad(self.vae ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
set_requires_grad(self.vae ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
set_requires_grad(self.unet ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
set_requires_grad(self.unet ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = min(int(num_inference_steps * strength ) ,snake_case )
lowercase : List[Any] = max(num_inference_steps - init_timestep ,0 )
lowercase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
if not isinstance(snake_case ,torch.Tensor ):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(snake_case )}" )
lowercase : List[str] = image.to(device=snake_case ,dtype=snake_case )
if isinstance(snake_case ,snake_case ):
lowercase : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case )
]
lowercase : Tuple = torch.cat(snake_case ,dim=0 )
else:
lowercase : List[str] = self.vae.encode(snake_case ).latent_dist.sample(snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase : Any = 0.18_215 * init_latents
lowercase : Dict = init_latents.repeat_interleave(snake_case ,dim=0 )
lowercase : List[str] = randn_tensor(init_latents.shape ,generator=snake_case ,device=snake_case ,dtype=snake_case )
# get latents
lowercase : Optional[int] = self.scheduler.add_noise(snake_case ,snake_case ,snake_case )
lowercase : Optional[Any] = init_latents
return latents
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.coca_transform(snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowercase : List[Any] = self.coca_model.generate(transformed_image.to(device=self.device ,dtype=self.coca_model.dtype ) )
lowercase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" ,"""""" ).rstrip(""" .,""" )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = self.feature_extractor.preprocess(snake_case )
lowercase : Optional[int] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
lowercase : List[Any] = self.clip_model.get_image_features(snake_case )
lowercase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=snake_case )
lowercase : Tuple = image_embeddings_clip.repeat_interleave(snake_case ,dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Optional[int] = latents.detach().requires_grad_()
lowercase : Optional[int] = self.scheduler.scale_model_input(snake_case ,snake_case )
# predict the noise residual
lowercase : Optional[int] = self.unet(snake_case ,snake_case ,encoder_hidden_states=snake_case ).sample
if isinstance(self.scheduler ,(PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowercase : Optional[int] = self.scheduler.alphas_cumprod[timestep]
lowercase : Union[str, Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase : Tuple = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowercase : int = torch.sqrt(snake_case )
lowercase : Any = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler ,snake_case ):
lowercase : Dict = self.scheduler.sigmas[index]
lowercase : Tuple = latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase : Optional[Any] = 1 / 0.18_215 * sample
lowercase : Union[str, Any] = self.vae.decode(snake_case ).sample
lowercase : str = (image / 2 + 0.5).clamp(0 ,1 )
lowercase : int = transforms.Resize(self.feature_extractor_size )(snake_case )
lowercase : Tuple = self.normalize(snake_case ).to(latents.dtype )
lowercase : Tuple = self.clip_model.get_image_features(snake_case )
lowercase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=snake_case )
lowercase : str = spherical_dist_loss(snake_case ,snake_case ).mean() * clip_guidance_scale
lowercase : List[Any] = -torch.autograd.grad(snake_case ,snake_case )[0]
if isinstance(self.scheduler ,snake_case ):
lowercase : Any = latents.detach() + grads * (sigma**2)
lowercase : Optional[Any] = noise_pred_original
else:
lowercase : int = noise_pred_original - torch.sqrt(snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self ,snake_case ,snake_case ,snake_case = None ,snake_case = None ,snake_case = 512 ,snake_case = 512 ,snake_case = 0.6 ,snake_case = 50 ,snake_case = 7.5 ,snake_case = 1 ,snake_case = 0.0 ,snake_case = 100 ,snake_case = None ,snake_case = "pil" ,snake_case = True ,snake_case = 0.8 ,snake_case = 0.1 ,snake_case = 0.1 ,):
'''simple docstring'''
if isinstance(snake_case ,snake_case ) and len(snake_case ) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(snake_case )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(snake_case ,torch.Generator ) and batch_size > 1:
lowercase : str = [generator] + [None] * (batch_size - 1)
lowercase : Union[str, Any] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
lowercase : Optional[int] = [x[0] for x in coca_is_none if x[1]]
lowercase : int = """, """.join(snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(snake_case ):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
lowercase : Optional[int] = self.get_image_description(snake_case )
if style_prompt is None:
if len(snake_case ):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
lowercase : str = self.get_image_description(snake_case )
# get prompt text embeddings for content and style
lowercase : List[Any] = self.tokenizer(
snake_case ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=snake_case ,return_tensors="""pt""" ,)
lowercase : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowercase : Optional[int] = self.tokenizer(
snake_case ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=snake_case ,return_tensors="""pt""" ,)
lowercase : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowercase : Optional[Any] = slerp(snake_case ,snake_case ,snake_case )
# duplicate text embeddings for each generation per prompt
lowercase : str = text_embeddings.repeat_interleave(snake_case ,dim=0 )
# set timesteps
lowercase : str = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowercase : Tuple = {}
if accepts_offset:
lowercase : int = 1
self.scheduler.set_timesteps(snake_case ,**snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowercase , lowercase : Optional[int] = self.get_timesteps(snake_case ,snake_case ,self.device )
lowercase : Tuple = timesteps[:1].repeat(snake_case )
# Preprocess image
lowercase : str = preprocess(snake_case ,snake_case ,snake_case )
lowercase : int = self.prepare_latents(
snake_case ,snake_case ,snake_case ,text_embeddings.dtype ,self.device ,snake_case )
lowercase : List[Any] = preprocess(snake_case ,snake_case ,snake_case )
lowercase : Tuple = self.prepare_latents(
snake_case ,snake_case ,snake_case ,text_embeddings.dtype ,self.device ,snake_case )
lowercase : List[str] = slerp(snake_case ,snake_case ,snake_case )
if clip_guidance_scale > 0:
lowercase : Union[str, Any] = self.get_clip_image_embeddings(snake_case ,snake_case )
lowercase : Optional[int] = self.get_clip_image_embeddings(snake_case ,snake_case )
lowercase : Optional[int] = slerp(
snake_case ,snake_case ,snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase : List[str] = content_text_input.input_ids.shape[-1]
lowercase : Optional[Any] = self.tokenizer([""""""] ,padding="""max_length""" ,max_length=snake_case ,return_tensors="""pt""" )
lowercase : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowercase : Any = uncond_embeddings.repeat_interleave(snake_case ,dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowercase : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowercase : str = torch.randn(snake_case ,generator=snake_case ,device="""cpu""" ,dtype=snake_case ).to(
self.device )
else:
lowercase : Optional[int] = torch.randn(snake_case ,generator=snake_case ,device=self.device ,dtype=snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowercase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase : str = {}
if accepts_eta:
lowercase : str = eta
# check if the scheduler accepts generator
lowercase : Optional[Any] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowercase : Tuple = generator
with self.progress_bar(total=snake_case ):
for i, t in enumerate(snake_case ):
# expand the latents if we are doing classifier free guidance
lowercase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase : Optional[int] = self.scheduler.scale_model_input(snake_case ,snake_case )
# predict the noise residual
lowercase : Dict = self.unet(snake_case ,snake_case ,encoder_hidden_states=snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowercase , lowercase : str = noise_pred.chunk(2 )
lowercase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowercase : int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowercase , lowercase : Union[str, Any] = self.cond_fn(
snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,)
# compute the previous noisy sample x_t -> x_t-1
lowercase : Any = self.scheduler.step(snake_case ,snake_case ,snake_case ,**snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase : Optional[Any] = 1 / 0.18_215 * latents
lowercase : Any = self.vae.decode(snake_case ).sample
lowercase : Optional[Any] = (image / 2 + 0.5).clamp(0 ,1 )
lowercase : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase : List[str] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=snake_case ,nsfw_content_detected=snake_case )
| 20
|
"""simple docstring"""
from math import factorial, radians
def lowercase ( _snake_case : float , _snake_case : int = 18 , _snake_case : int = 10 ) ->float:
"""simple docstring"""
__snake_case : Any = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__snake_case : int = radians(_snake_case )
__snake_case : str = angle_in_radians
__snake_case : Optional[int] = 3
__snake_case : List[Any] = -1
for _ in range(_snake_case ):
result += (b * (angle_in_radians**a)) / factorial(_snake_case )
__snake_case : int = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_snake_case , _snake_case )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 102
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
UpperCAmelCase_ : str = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 366
|
def UpperCamelCase ( _A : str )-> str:
"""simple docstring"""
A__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase ( _A : str )-> dict[str, str]:
"""simple docstring"""
A__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
A__ = remove_duplicates(key.upper() )
A__ = len(_A )
# First fill cipher with key characters
A__ = {alphabet[i]: char for i, char in enumerate(_A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_A ) , 26 ):
A__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
A__ = alphabet[i - offset]
A__ = char
return cipher_alphabet
def UpperCamelCase ( _A : str , _A : dict[str, str] )-> str:
"""simple docstring"""
return "".join(cipher_map.get(_A , _A ) for ch in message.upper() )
def UpperCamelCase ( _A : str , _A : dict[str, str] )-> str:
"""simple docstring"""
A__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_A , _A ) for ch in message.upper() )
def UpperCamelCase ( )-> None:
"""simple docstring"""
A__ = input("Enter message to encode or decode: " ).strip()
A__ = input("Enter keyword: " ).strip()
A__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
A__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
A__ = create_cipher_map(_A )
print(func(_A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 198
| 0
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : int = logging.get_logger(__name__)
set_seed(7_70)
UpperCAmelCase : Tuple = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase : Optional[int] = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase : int = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase : Union[str, Any] = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase : Dict = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any]=False ):
'''simple docstring'''
lowerCamelCase = model_type
if use_small:
key += "_small"
return os.path.join(lowerCamelCase__ , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
hf_hub_download(repo_id=lowerCamelCase__ , filename=lowerCamelCase__ , local_dir=lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Optional[int]="text" ):
'''simple docstring'''
if model_type == "text":
lowerCamelCase = BarkSemanticModel
lowerCamelCase = BarkSemanticConfig
lowerCamelCase = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCamelCase = BarkCoarseModel
lowerCamelCase = BarkCoarseConfig
lowerCamelCase = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCamelCase = BarkFineModel
lowerCamelCase = BarkFineConfig
lowerCamelCase = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCamelCase = f'{model_type}_small' if use_small else model_type
lowerCamelCase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCamelCase__ ):
logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
lowerCamelCase = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
# this is a hack
lowerCamelCase = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
lowerCamelCase = model_args["""vocab_size"""]
lowerCamelCase = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCamelCase = model_args.pop("""n_head""" )
lowerCamelCase = model_args.pop("""n_embd""" )
lowerCamelCase = model_args.pop("""n_layer""" )
lowerCamelCase = ConfigClass(**checkpoint["""model_args"""] )
lowerCamelCase = ModelClass(config=lowerCamelCase__ )
lowerCamelCase = GenerationConfigClass()
lowerCamelCase = model_generation_config
lowerCamelCase = checkpoint["""model"""]
# fixup checkpoint
lowerCamelCase = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(lowerCamelCase__ ):
# replace part of the key with corresponding layer name in HF implementation
lowerCamelCase = k[len(lowerCamelCase__ ) :]
for old_layer_name in new_layer_name_dict:
lowerCamelCase = new_k.replace(lowerCamelCase__ , new_layer_name_dict[old_layer_name] )
lowerCamelCase = state_dict.pop(lowerCamelCase__ )
lowerCamelCase = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCamelCase = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
lowerCamelCase = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCamelCase = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(lowerCamelCase__ ) != 0:
raise ValueError(f'extra keys found: {extra_keys}' )
if len(lowerCamelCase__ ) != 0:
raise ValueError(f'missing keys: {missing_keys}' )
model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
lowerCamelCase = model.num_parameters(exclude_embeddings=lowerCamelCase__ )
lowerCamelCase = checkpoint["""best_val_loss"""].item()
logger.info(f'model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCamelCase__ , 3 )} loss' )
model.eval()
model.to(lowerCamelCase__ )
del checkpoint, state_dict
return model
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Union[str, Any]="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCamelCase = """cpu""" # do conversion on cpu
lowerCamelCase = _get_ckpt_path(lowerCamelCase__ , use_small=lowerCamelCase__ )
lowerCamelCase = _load_model(lowerCamelCase__ , lowerCamelCase__ , model_type=lowerCamelCase__ , use_small=lowerCamelCase__ )
# load bark initial model
lowerCamelCase = _bark_load_model(lowerCamelCase__ , """cpu""" , model_type=lowerCamelCase__ , use_small=lowerCamelCase__ )
if model_type == "text":
lowerCamelCase = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=lowerCamelCase__ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don\'t have the same number of parameters""" )
# check if same output as the bark model
lowerCamelCase = 5
lowerCamelCase = 10
if model_type in ["text", "coarse"]:
lowerCamelCase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowerCamelCase = bark_model(lowerCamelCase__ )[0]
lowerCamelCase = model(lowerCamelCase__ )
# take last logits
lowerCamelCase = output_new_model_total.logits[:, [-1], :]
else:
lowerCamelCase = 3
lowerCamelCase = 8
lowerCamelCase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = bark_model(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don\'t have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : str , ):
'''simple docstring'''
lowerCamelCase = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = BarkSemanticConfig.from_pretrained(os.path.join(lowerCamelCase__ , """config.json""" ) )
lowerCamelCase = BarkCoarseConfig.from_pretrained(os.path.join(lowerCamelCase__ , """config.json""" ) )
lowerCamelCase = BarkFineConfig.from_pretrained(os.path.join(lowerCamelCase__ , """config.json""" ) )
lowerCamelCase = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
lowerCamelCase = BarkSemanticModel.from_pretrained(lowerCamelCase__ )
lowerCamelCase = BarkCoarseModel.from_pretrained(lowerCamelCase__ )
lowerCamelCase = BarkFineModel.from_pretrained(lowerCamelCase__ )
lowerCamelCase = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
lowerCamelCase = BarkConfig.from_sub_model_configs(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCamelCase = BarkModel(lowerCamelCase__ )
lowerCamelCase = semantic
lowerCamelCase = coarseAcoustic
lowerCamelCase = fineAcoustic
lowerCamelCase = codec
lowerCamelCase = bark_generation_config
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
bark.save_pretrained(lowerCamelCase__ , repo_id=lowerCamelCase__ , push_to_hub=lowerCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase : Tuple = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 252
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4
| 0
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : Optional[int] = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , _UpperCAmelCase ).groups()[0]
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None ) -> List[str]:
lowerCamelCase__ : List[Any] = file_names
lowerCamelCase__ : Optional[int] = image_transform
lowerCamelCase__ : int = label_to_id
def __len__( self : Tuple ) -> Dict:
return len(self.file_names )
def __getitem__( self : Optional[Any] , UpperCAmelCase : List[Any] ) -> List[str]:
lowerCamelCase__ : int = self.file_names[idx]
lowerCamelCase__ : str = PIL.Image.open(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = raw_image.convert('RGB' )
if self.image_transform is not None:
lowerCamelCase__ : Optional[int] = self.image_transform(UpperCAmelCase )
lowerCamelCase__ : Tuple = extract_label(UpperCAmelCase )
if self.label_to_id is not None:
lowerCamelCase__ : List[str] = self.label_to_id[label]
return {"image": image, "label": label}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
# Initialize accelerator
if args.with_tracking:
lowerCamelCase__ : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowerCamelCase__ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Optional[int] = config['lr']
lowerCamelCase__ : Union[str, Any] = int(config['num_epochs'] )
lowerCamelCase__ : Any = int(config['seed'] )
lowerCamelCase__ : Union[str, Any] = int(config['batch_size'] )
lowerCamelCase__ : Any = config['image_size']
if not isinstance(_UpperCAmelCase , (list, tuple) ):
lowerCamelCase__ : Dict = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowerCamelCase__ : Dict = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCamelCase__ : Optional[int] = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCamelCase__ : Union[str, Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCamelCase__ : Optional[Any] = os.path.split(_UpperCAmelCase )[-1].split('.' )[0]
accelerator.init_trackers(_UpperCAmelCase , _UpperCAmelCase )
# Grab all the image filenames
lowerCamelCase__ : Dict = [os.path.join(args.data_dir , _UpperCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowerCamelCase__ : Union[str, Any] = [extract_label(_UpperCAmelCase ) for fname in file_names]
lowerCamelCase__ : Any = list(set(_UpperCAmelCase ) )
id_to_label.sort()
lowerCamelCase__ : Optional[Any] = {lbl: i for i, lbl in enumerate(_UpperCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(_UpperCAmelCase )
torch.manual_seed(_UpperCAmelCase )
torch.cuda.manual_seed_all(_UpperCAmelCase )
# Split our filenames between train and validation
lowerCamelCase__ : Dict = np.random.permutation(len(_UpperCAmelCase ) )
lowerCamelCase__ : Tuple = int(0.8 * len(_UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = random_perm[:cut]
lowerCamelCase__ : Optional[Any] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCamelCase__ : List[str] = Compose([RandomResizedCrop(_UpperCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
lowerCamelCase__ : List[str] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# For evaluation, we use a deterministic Resize
lowerCamelCase__ : Dict = Compose([Resize(_UpperCAmelCase ), ToTensor()] )
lowerCamelCase__ : Optional[int] = PetsDataset([file_names[i] for i in eval_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# Instantiate dataloaders.
lowerCamelCase__ : List[Any] = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
lowerCamelCase__ : int = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : List[Any] = create_model('resnet50d' , pretrained=_UpperCAmelCase , num_classes=len(_UpperCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : str = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCamelCase__ : Union[str, Any] = False
for param in model.get_classifier().parameters():
lowerCamelCase__ : Tuple = True
# We normalize the batches of images to be a bit faster.
lowerCamelCase__ : Tuple = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowerCamelCase__ : Optional[Any] = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : Any = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowerCamelCase__ : Dict = OneCycleLR(optimizer=_UpperCAmelCase , max_lr=_UpperCAmelCase , epochs=_UpperCAmelCase , steps_per_epoch=len(_UpperCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ : List[str] = 0
# We also need to keep track of the starting epoch so files are named properly
lowerCamelCase__ : Optional[int] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase__ : Optional[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCamelCase__ : Dict = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCamelCase__ : Dict = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCamelCase__ : Optional[int] = os.path.splitext(_UpperCAmelCase )[0]
if "epoch" in training_difference:
lowerCamelCase__ : Optional[int] = int(training_difference.replace('epoch_' , '' ) ) + 1
lowerCamelCase__ : Any = None
else:
lowerCamelCase__ : Optional[int] = int(training_difference.replace('step_' , '' ) )
lowerCamelCase__ : int = resume_step // len(_UpperCAmelCase )
resume_step -= starting_epoch * len(_UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase , _UpperCAmelCase ):
model.train()
if args.with_tracking:
lowerCamelCase__ : Optional[int] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCamelCase__ : Optional[int] = accelerator.skip_first_batches(_UpperCAmelCase , _UpperCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCamelCase__ : str = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : Optional[int] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : Tuple = (batch['image'] - mean) / std
lowerCamelCase__ : Union[str, Any] = model(_UpperCAmelCase )
lowerCamelCase__ : Any = torch.nn.functional.cross_entropy(_UpperCAmelCase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Dict = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCamelCase__ : List[Any] = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
model.eval()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : List[str] = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : List[str] = (batch['image'] - mean) / std
with torch.no_grad():
lowerCamelCase__ : Any = model(_UpperCAmelCase )
lowerCamelCase__ : List[Any] = outputs.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : int = accelerator.gather_for_metrics((predictions, batch['label']) )
lowerCamelCase__ : Optional[int] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCamelCase__ : Any = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(_UpperCAmelCase ),
'epoch': epoch,
} , step=_UpperCAmelCase , )
if checkpointing_steps == "epoch":
lowerCamelCase__ : Tuple = F"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCamelCase__ : Optional[Any] = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE ( ) -> int:
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_UpperCAmelCase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_UpperCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_UpperCAmelCase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowerCamelCase__ : Any = parser.parse_args()
lowerCamelCase__ : Any = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 45
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
lowerCamelCase__ : str = len(bin(_UpperCAmelCase )[3:] )
lowerCamelCase__ : Dict = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
lowerCamelCase__ : Optional[int] = (
(
'1'
+ '0' * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 1
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a__ = (3, 9, -11, 0, 7, 5, 1, -1)
a__ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> str:
_a : Node | None = None
for i in sorted(SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ):
_a : Tuple = Node(SCREAMING_SNAKE_CASE__ , self.head )
def __iter__( self ) -> Optional[Any]:
_a : List[Any] = self.head
while node:
yield node.data
_a : Union[str, Any] = node.next_node
def __len__( self ) -> List[Any]:
return sum(1 for _ in self )
def __str__( self ) -> Tuple:
return " -> ".join([str(SCREAMING_SNAKE_CASE__ ) for node in self] )
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Union[str, Any] ) -> str:
"""simple docstring"""
return SortedLinkedList(list(__a ) + list(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 235
|
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A_ = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _snake_case :
_A : str
_A : Optional[str] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[Any] ):
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def __UpperCamelCase ( self : List[Any] ):
return self.major, self.minor, self.patch
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ):
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return Version(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return other
raise TypeError(F'''{other} (type {type(SCREAMING_SNAKE_CASE__ )}) cannot be compared to version.''' )
def __eq__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ):
try:
SCREAMING_SNAKE_CASE:List[str] = self._validate_operand(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Tuple = self._validate_operand(SCREAMING_SNAKE_CASE__ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __UpperCamelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __UpperCamelCase ( self : Tuple ):
return self.version_str
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = _VERSION_REG.match(snake_case )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(snake_case ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def A_ ( snake_case ):
return ".".join(str(snake_case ) for v in version_tuple )
| 139
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> Optional[Any]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :List[str] , lowercase_ :Dict , lowercase_ :Union[str, Any] , lowercase_ :int , lowercase_ :Dict ) -> Any:
if latents is None:
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase = latents.to(lowercase_ )
UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[str]=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[Any]=0 ) -> Tuple:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :int ) -> List[Any]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self :List[str] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :torch.FloatTensor , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[torch.FloatTensor] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = hint.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
UpperCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase = self.scheduler.timesteps
UpperCAmelCase = self.movq.config.latent_channels
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'image_embeds': image_embeds, 'hint': hint}
UpperCAmelCase = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 350
|
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = WavaVecaPhonemeCTCTokenizer
__UpperCamelCase = False
def UpperCAmelCase__ ( self :Optional[int] ) -> int:
super().setUp()
UpperCAmelCase = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
def UpperCAmelCase__ ( self :Dict , lowercase_ :Any , lowercase_ :Union[str, Any]=False , lowercase_ :int=20 , lowercase_ :Dict=5 ) -> Tuple[str, list]:
UpperCAmelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase_ )) for i in range(len(lowercase_ ) )]
UpperCAmelCase = list(filter(lambda lowercase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase_ ) , lowercase_ ) )
if max_length is not None and len(lowercase_ ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(lowercase_ ) < min_length and len(lowercase_ ) > 0:
while len(lowercase_ ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
if " " not in output_txt and len(lowercase_ ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase_ )
)
if with_prefix_space:
UpperCAmelCase = ' ' + output_txt
UpperCAmelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
return output_txt, output_ids
def UpperCAmelCase__ ( self :Union[str, Any] , **lowercase_ :Union[str, Any] ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase__ ( self :int ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
UpperCAmelCase = tokenizer('m xxx ɪ' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
UpperCAmelCase = tokenizer('m aaa ɪ ccc' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCAmelCase = tokenizer('maɪ c' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [3, 2_00] ) # mai should be <unk> (=3)
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(lowercase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def UpperCAmelCase__ ( self :Dict ) -> int:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Dict:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCAmelCase = tokenizer.decode(sample_ids[0] )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def UpperCAmelCase__ ( self :Any ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(lowercase_ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def UpperCAmelCase__ ( self :Any ) -> Any:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def UpperCAmelCase__ ( self :Dict ) -> Union[str, Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCAmelCase = tokenizer.decode(sample_ids[0] )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
UpperCAmelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase_ )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def UpperCAmelCase__ ( self :int ) -> int:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=lowercase_ )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer(lowercase_ , phonemizer_lang='en-us' ).input_ids
UpperCAmelCase = tokenizer(lowercase_ , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(lowercase_ , lowercase_ )
UpperCAmelCase = tokenizer.decode(lowercase_ )
UpperCAmelCase = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(lowercase_ , 'ɛ l o h aʊ a ʁ j u' )
def UpperCAmelCase__ ( self :int ) -> List[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how Are you'
UpperCAmelCase = 'hello how are you'
UpperCAmelCase = tokenizer(lowercase_ ).input_ids
UpperCAmelCase = tokenizer(lowercase_ ).input_ids
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> int:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def UpperCAmelCase__ ( lowercase_ :List[str] , lowercase_ :List[str] ) -> List[str]:
UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase__ ( self :str ) -> Optional[int]:
UpperCAmelCase = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCAmelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCAmelCase = tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ , filter_word_delimiter_token=lowercase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]:
UpperCAmelCase = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(lowercase_ :List[Any] , lowercase_ :str ):
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase_ ) )
# transform list to ModelOutput
UpperCAmelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(lowercase_ :Any , lowercase_ :str ):
if isinstance(lowercase_ , lowercase_ ):
[recursive_check(lowercase_ , lowercase_ ) for la, la in zip(lowercase_ , lowercase_ )]
self.assertEqual(lowercase_ , lowercase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , output_char_offsets=lowercase_ )
UpperCAmelCase = [tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ ) for ids in sample_ids]
check_list_tuples_equal(lowercase_ , lowercase_ )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def UpperCAmelCase__ ( self :Any ) -> str:
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def UpperCAmelCase__ ( self :str ) -> List[str]:
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def UpperCAmelCase__ ( self :List[str] ) -> int:
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
pass
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
UpperCAmelCase = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd']
UpperCAmelCase = tokenizer.add_tokens(lowercase_ )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size + len(lowercase_ ) )
UpperCAmelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
UpperCAmelCase = tokenizer.add_special_tokens(lowercase_ )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size_a + len(lowercase_ ) )
UpperCAmelCase = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase__ ( self :Tuple ) -> Optional[Any]:
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase__ ( self :int ) -> Any:
pass
def UpperCAmelCase__ ( self :Tuple ) -> Dict:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
UpperCAmelCase = self.get_tokenizers(fast=lowercase_ , do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase_ )
self.assertIsInstance(output['text'] , lowercase_ )
| 181
| 0
|
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def __lowercase ( _a , _a , _a=None , **_a ):
snake_case_ : Union[str, Any] = [x.strip() for x in open(_a ).readlines()]
snake_case_ : Any = [x.strip() for x in open(_a ).readlines()][: len(_a )]
snake_case_ : Union[str, Any] = calculate_rouge(_a , _a , **_a )
if save_path is not None:
save_json(_a , _a , indent=_a )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 264
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
a_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _lowercase ( datasets.BuilderConfig ):
lowercase = None
def __lowercase ( lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : List[int] , ):
import pyspark
def generate_fn():
UpperCamelCase_ : Dict = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
UpperCamelCase_ : Tuple = df_with_partition_id.select('*' ).where(F"part_id = {partition_id}" ).drop('part_id' )
UpperCamelCase_ : Union[str, Any] = partition_df.collect()
UpperCamelCase_ : Any = 0
for row in rows:
yield F"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class _lowercase ( _BaseExamplesIterable ):
def __init__( self : Optional[int] , snake_case : "pyspark.sql.DataFrame" , snake_case : Tuple=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = df
UpperCamelCase_ : int = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCamelCase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Optional[int] ) -> Any:
"""simple docstring"""
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : np.random.Generator ) -> "SparkExamplesIterable":
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(snake_case )
return SparkExamplesIterable(self.df , partition_order=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : int , snake_case : int ) -> "SparkExamplesIterable":
"""simple docstring"""
UpperCamelCase_ : Tuple = self.split_shard_indices_by_worker(snake_case , snake_case )
return SparkExamplesIterable(self.df , partition_order=snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
"""simple docstring"""
return len(self.partition_order )
class _lowercase ( datasets.DatasetBuilder ):
lowercase = SparkConfig
def __init__( self : List[Any] , snake_case : "pyspark.sql.DataFrame" , snake_case : str = None , snake_case : str = None , **snake_case : Optional[Any] , ) -> List[str]:
"""simple docstring"""
import pyspark
UpperCamelCase_ : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCamelCase_ : str = df
UpperCamelCase_ : Tuple = working_dir
super().__init__(
cache_dir=snake_case , config_name=str(self.df.semanticHash() ) , **snake_case , )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
"""simple docstring"""
def create_cache_and_write_probe(snake_case : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=snake_case )
UpperCamelCase_ : Tuple = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(snake_case , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCamelCase_ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(snake_case ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : datasets.download.download_manager.DownloadManager ) -> Optional[int]:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
import pyspark
def get_arrow_batch_size(snake_case : Dict ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
UpperCamelCase_ : List[str] = self.df.count()
UpperCamelCase_ : Union[str, Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCamelCase_ : str = (
self.df.limit(snake_case )
.repartition(1 )
.mapInArrow(snake_case , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCamelCase_ : Optional[int] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCamelCase_ : Optional[Any] = min(snake_case , int(approx_total_size / max_shard_size ) )
UpperCamelCase_ : int = self.df.repartition(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : str , snake_case : str , snake_case : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
"""simple docstring"""
import pyspark
UpperCamelCase_ : List[Any] = ParquetWriter if file_format == 'parquet' else ArrowWriter
UpperCamelCase_ : List[str] = os.path.join(self._working_dir , os.path.basename(snake_case ) ) if self._working_dir else fpath
UpperCamelCase_ : Union[str, Any] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCamelCase_ : Union[str, Any] = self.config.features
UpperCamelCase_ : Any = self._writer_batch_size
UpperCamelCase_ : Dict = self._fs.storage_options
def write_arrow(snake_case : List[str] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCamelCase_ : Any = pyspark.TaskContext().taskAttemptId()
UpperCamelCase_ : str = next(snake_case , snake_case )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
UpperCamelCase_ : Any = 0
UpperCamelCase_ : Optional[Any] = writer_class(
features=snake_case , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=snake_case , storage_options=snake_case , embed_local_files=snake_case , )
UpperCamelCase_ : str = pa.Table.from_batches([first_batch] )
writer.write_table(snake_case )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCamelCase_, UpperCamelCase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
UpperCamelCase_ : Union[str, Any] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=snake_case , storage_options=snake_case , embed_local_files=snake_case , )
UpperCamelCase_ : Optional[Any] = pa.Table.from_batches([batch] )
writer.write_table(snake_case )
if writer._num_bytes > 0:
UpperCamelCase_, UpperCamelCase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(snake_case ) ):
UpperCamelCase_ : Dict = os.path.join(os.path.dirname(snake_case ) , os.path.basename(snake_case ) )
shutil.move(snake_case , snake_case )
UpperCamelCase_ : int = (
self.df.mapInArrow(snake_case , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : "datasets.SplitGenerator" , snake_case : str = "arrow" , snake_case : Optional[Union[str, int]] = None , snake_case : Optional[int] = None , **snake_case : Any , ) -> int:
"""simple docstring"""
self._validate_cache_dir()
UpperCamelCase_ : Optional[int] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(snake_case )
UpperCamelCase_ : List[str] = not is_remote_filesystem(self._fs )
UpperCamelCase_ : List[Any] = os.path.join if is_local else posixpath.join
UpperCamelCase_ : Optional[int] = '-TTTTT-SSSSS-of-NNNNN'
UpperCamelCase_ : Dict = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
UpperCamelCase_ : int = path_join(self._output_dir , snake_case )
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : Optional[Any] = []
UpperCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(snake_case , snake_case , snake_case ):
(
(
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
),
) : Optional[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(snake_case )
UpperCamelCase_ : Optional[Any] = total_num_examples
UpperCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards." )
if total_shards > 1:
UpperCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCamelCase_ : int = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
snake_case : int , snake_case : int , snake_case : int , ):
rename(
snake_case , fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace('TTTTT-SSSSS' , f"{global_shard_id:05d}" ).replace('NNNNN' , f"{total_shards:05d}" ) , )
UpperCamelCase_ : Any = []
UpperCamelCase_ : Optional[int] = 0
for i in range(len(snake_case ) ):
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(snake_case ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(snake_case , len(snake_case ) ).map(lambda snake_case : _rename_shard(*snake_case ) ).collect()
else:
# don't use any pattern
UpperCamelCase_ : Tuple = 0
UpperCamelCase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace(snake_case , '' ) , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 175
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Tuple = 'levit'
def __init__( self , lowerCamelCase=224 , lowerCamelCase=3 , lowerCamelCase=3 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=16 , lowerCamelCase=[128, 256, 384] , lowerCamelCase=[4, 8, 12] , lowerCamelCase=[4, 4, 4] , lowerCamelCase=[16, 16, 16] , lowerCamelCase=0 , lowerCamelCase=[2, 2, 2] , lowerCamelCase=[2, 2, 2] , lowerCamelCase=0.02 , **lowerCamelCase , ) -> Tuple:
super().__init__(**lowerCamelCase )
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = kernel_size
snake_case_ = stride
snake_case_ = padding
snake_case_ = hidden_sizes
snake_case_ = num_attention_heads
snake_case_ = depths
snake_case_ = key_dim
snake_case_ = drop_path_rate
snake_case_ = patch_size
snake_case_ = attention_ratio
snake_case_ = mlp_ratio
snake_case_ = initializer_range
snake_case_ = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Any = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
return 1e-4
| 367
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCamelCase( lowercase_ ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def UpperCamelCase( lowercase_ , lowercase_ ) -> XGBClassifier:
'''simple docstring'''
snake_case_ = XGBClassifier()
classifier.fit(lowercase_ , lowercase_ )
return classifier
def UpperCamelCase( ) -> None:
'''simple docstring'''
snake_case_ = load_iris()
snake_case_ , snake_case_ = data_handling(lowercase_ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = train_test_split(
lowercase_ , lowercase_ , test_size=0.25 )
snake_case_ = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case_ = xgboost(lowercase_ , lowercase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase_ , lowercase_ , lowercase_ , display_labels=lowercase_ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 34
| 0
|
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__a = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
| 337
| 1
|
'''simple docstring'''
import string
import numpy
def lowerCamelCase__ ( _A , _A ):
return b if a == 0 else greatest_common_divisor(b % a , _A )
class a__:
lowercase__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase__ = numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
lowercase__ = numpy.vectorize(lowerCamelCase__ )
def __init__( self : Any , __snake_case : numpy.ndarray ):
a : Optional[Any] = self.modulus(__snake_case ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
a : Union[str, Any] = encrypt_key.shape[0]
def lowercase_ ( self : Optional[Any] , __snake_case : str ):
return self.key_string.index(__snake_case )
def lowercase_ ( self : Dict , __snake_case : int ):
return self.key_string[round(__snake_case )]
def lowercase_ ( self : Any ):
a : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
a : List[str] = det % len(self.key_string )
a : Any = len(self.key_string )
if greatest_common_divisor(__snake_case , len(self.key_string ) ) != 1:
a : Any = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__snake_case )
def lowercase_ ( self : int , __snake_case : str ):
a : Dict = [char for char in text.upper() if char in self.key_string]
a : Optional[int] = chars[-1]
while len(__snake_case ) % self.break_key != 0:
chars.append(__snake_case )
return "".join(__snake_case )
def lowercase_ ( self : Optional[Any] , __snake_case : str ):
a : str = self.process_text(text.upper() )
a : Optional[Any] = ''
for i in range(0 , len(__snake_case ) - self.break_key + 1 , self.break_key ):
a : List[str] = text[i : i + self.break_key]
a : Optional[Any] = [self.replace_letters(__snake_case ) for char in batch]
a : Dict = numpy.array([vec] ).T
a : str = self.modulus(self.encrypt_key.dot(__snake_case ) ).T.tolist()[
0
]
a : Any = ''.join(
self.replace_digits(__snake_case ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowercase_ ( self : Optional[int] ):
a : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
a : Optional[int] = det % len(self.key_string )
a : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
a : Union[str, Any] = i
break
a : List[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__snake_case ) )
def lowercase_ ( self : Optional[Any] , __snake_case : str ):
a : List[str] = self.make_decrypt_key()
a : Union[str, Any] = self.process_text(text.upper() )
a : Tuple = ''
for i in range(0 , len(__snake_case ) - self.break_key + 1 , self.break_key ):
a : Union[str, Any] = text[i : i + self.break_key]
a : int = [self.replace_letters(__snake_case ) for char in batch]
a : Union[str, Any] = numpy.array([vec] ).T
a : int = self.modulus(decrypt_key.dot(__snake_case ) ).T.tolist()[0]
a : List[Any] = ''.join(
self.replace_digits(__snake_case ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
a : int = int(input('Enter the order of the encryption key: ' ) )
a : Union[str, Any] = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(_A ):
a : int = [int(_A ) for x in input().split()]
hill_matrix.append(_A )
a : Optional[Any] = HillCipher(numpy.array(_A ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
a : Optional[Any] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
a : Any = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(_A ) )
elif option == "2":
a : Optional[Any] = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 359
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase: Any = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
lowercase__ = ["""pixel_values"""]
def __init__( self : List[str] , __snake_case : bool = True , __snake_case : int = 32 , __snake_case : Union[str, Any]=PILImageResampling.BILINEAR , __snake_case : bool = True , **__snake_case : List[Any] , ):
a : Optional[Any] = do_resize
a : Union[str, Any] = do_rescale
a : Union[str, Any] = size_divisor
a : List[Any] = resample
super().__init__(**__snake_case )
def lowercase_ ( self : Optional[Any] , __snake_case : np.ndarray , __snake_case : int , __snake_case : Tuple , __snake_case : Optional[ChannelDimension] = None , **__snake_case : Tuple ):
a , a : Optional[int] = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a : int = height // size_divisor * size_divisor
a : int = width // size_divisor * size_divisor
a : Any = resize(__snake_case , (new_h, new_w) , resample=__snake_case , data_format=__snake_case , **__snake_case )
return image
def lowercase_ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : float , __snake_case : Optional[ChannelDimension] = None , **__snake_case : Optional[Any] ):
return rescale(image=__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase_ ( self : List[str] , __snake_case : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __snake_case : Optional[bool] = None , __snake_case : Optional[int] = None , __snake_case : Dict=None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[TensorType, str]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : Any , ):
a : List[str] = do_resize if do_resize is not None else self.do_resize
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : Optional[Any] = size_divisor if size_divisor is not None else self.size_divisor
a : Union[str, Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a : Tuple = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a : str = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a : int = [self.resize(__snake_case , size_divisor=__snake_case , resample=__snake_case ) for image in images]
if do_rescale:
a : List[str] = [self.rescale(__snake_case , scale=1 / 2_55 ) for image in images]
a : Any = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
a : Any = {'pixel_values': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 96
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = '''Hello, World!'''
__a = '''en_XX'''
def __UpperCAmelCase ( a_: str, a_: str, a_: bool ):
_UpperCAmelCase : Union[str, Any] = Path("data_bin" )
_UpperCAmelCase : Tuple = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__UpperCamelCase ).parent ), checkpoint_file=Path(__UpperCamelCase ).name, _name="xmod_base", arch="xmod_base", task="multilingual_masked_lm", data_name_or_path=str(__UpperCamelCase ), bpe="sentencepiece", sentencepiece_model=str(Path(__UpperCamelCase ).parent / "sentencepiece.bpe.model" ), src_dict=str(data_dir / "dict.txt" ), )
xmod.eval() # disable dropout
print(__UpperCamelCase )
_UpperCAmelCase : Union[str, Any] = xmod.model.encoder.sentence_encoder
_UpperCAmelCase : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings, hidden_size=xmod.cfg.model.encoder_embed_dim, num_hidden_layers=xmod.cfg.model.encoder_layers, num_attention_heads=xmod.cfg.model.encoder_attention_heads, intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-5, pre_norm=xmod.cfg.model.encoder_normalize_before, adapter_reduction_factor=getattr(xmod.cfg.model, "bottleneck", 2 ), adapter_layer_norm=xmod.cfg.model.adapter_layer_norm, adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm, ln_before_adapter=xmod.cfg.model.ln_before_adapter, languages=xmod.cfg.model.languages, )
if classification_head:
_UpperCAmelCase : Optional[int] = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:", __UpperCamelCase )
_UpperCAmelCase : Any = XmodForSequenceClassification(__UpperCamelCase ) if classification_head else XmodForMaskedLM(__UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
_UpperCAmelCase : Dict = xmod_sent_encoder.embed_tokens.weight
_UpperCAmelCase : Any = xmod_sent_encoder.embed_positions.weight
_UpperCAmelCase : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_UpperCAmelCase : Dict = xmod_sent_encoder.layernorm_embedding.weight
_UpperCAmelCase : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_UpperCAmelCase : List[str] = model.roberta.encoder.layer[i]
_UpperCAmelCase : Tuple = xmod_sent_encoder.layers[i]
# self attention
_UpperCAmelCase : Any = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
_UpperCAmelCase : Optional[int] = xmod_layer.self_attn.q_proj.weight
_UpperCAmelCase : int = xmod_layer.self_attn.q_proj.bias
_UpperCAmelCase : Tuple = xmod_layer.self_attn.k_proj.weight
_UpperCAmelCase : List[str] = xmod_layer.self_attn.k_proj.bias
_UpperCAmelCase : int = xmod_layer.self_attn.v_proj.weight
_UpperCAmelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
_UpperCAmelCase : List[str] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
_UpperCAmelCase : Dict = xmod_layer.self_attn.out_proj.weight
_UpperCAmelCase : Dict = xmod_layer.self_attn.out_proj.bias
_UpperCAmelCase : str = xmod_layer.self_attn_layer_norm.weight
_UpperCAmelCase : Union[str, Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
_UpperCAmelCase : int = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
_UpperCAmelCase : List[Any] = xmod_layer.fca.weight
_UpperCAmelCase : Dict = xmod_layer.fca.bias
# output
_UpperCAmelCase : Tuple = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
_UpperCAmelCase : List[Any] = xmod_layer.fca.weight
_UpperCAmelCase : Optional[Any] = xmod_layer.fca.bias
_UpperCAmelCase : Union[str, Any] = xmod_layer.final_layer_norm.weight
_UpperCAmelCase : Dict = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_UpperCAmelCase : str = xmod_layer.adapter_layer_norm.weight
_UpperCAmelCase : List[Any] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_UpperCAmelCase : Any = bert_output.adapter_modules[lang_code]
_UpperCAmelCase : Dict = xmod_layer.adapter_modules[lang_code]
_UpperCAmelCase : Tuple = from_adapter.fca.weight
_UpperCAmelCase : Tuple = from_adapter.fca.bias
_UpperCAmelCase : str = from_adapter.fca.weight
_UpperCAmelCase : List[str] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_UpperCAmelCase : Dict = xmod_sent_encoder.layer_norm.weight
_UpperCAmelCase : Dict = xmod_sent_encoder.layer_norm.bias
if classification_head:
_UpperCAmelCase : Any = xmod.model.classification_heads["mnli"].dense.weight
_UpperCAmelCase : List[Any] = xmod.model.classification_heads["mnli"].dense.bias
_UpperCAmelCase : Optional[Any] = xmod.model.classification_heads["mnli"].out_proj.weight
_UpperCAmelCase : Any = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
_UpperCAmelCase : Optional[int] = xmod.model.encoder.lm_head.dense.weight
_UpperCAmelCase : Optional[int] = xmod.model.encoder.lm_head.dense.bias
_UpperCAmelCase : Any = xmod.model.encoder.lm_head.layer_norm.weight
_UpperCAmelCase : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
_UpperCAmelCase : int = xmod.model.encoder.lm_head.weight
_UpperCAmelCase : List[str] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_UpperCAmelCase : List[str] = xmod.encode(__UpperCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__UpperCamelCase )
_UpperCAmelCase : Optional[Any] = model(__UpperCamelCase )[0]
if classification_head:
_UpperCAmelCase : Dict = xmod.model.classification_heads["mnli"](xmod.extract_features(__UpperCamelCase ) )
else:
_UpperCAmelCase : str = xmod.model(__UpperCamelCase, lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape, their_output.shape )
_UpperCAmelCase : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
_UpperCAmelCase : str = torch.allclose(__UpperCamelCase, __UpperCamelCase, atol=1e-3 )
print("Do both models output the same tensors?", "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(__UpperCamelCase ).mkdir(parents=__UpperCamelCase, exist_ok=__UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__a = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 145
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__UpperCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__UpperCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__UpperCamelCase )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ = script_fpath.stem
SCREAMING_SNAKE_CASE__ = importlib.import_module(__UpperCamelCase )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 219
| 0
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__UpperCamelCase = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
__UpperCamelCase = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
__UpperCamelCase = soup.find("meta", {"property": "og:image"})["content"]
__UpperCamelCase = requests.get(image_url).content
__UpperCamelCase = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 369
|
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__UpperCamelCase = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _A :
lowercase__: str
lowercase__: Optional[str] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
lowercase__: Optional[Union[str, int]] = None
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.major, self.minor, self.patch
def lowercase__ ( self : Any , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
return Version(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
return other
raise TypeError(f'''{other} (type {type(__magic_name__ )}) cannot be compared to version.''' )
def __eq__( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
try:
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = self._validate_operand(__magic_name__ )
return self.tuple < other.tuple
def __hash__( self : Any ) -> Any:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase__ ( cls : List[str] , __magic_name__ : Tuple ) -> str:
"""simple docstring"""
__snake_case : List[str] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return self.version_str
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = _VERSION_REG.match(_lowerCamelCase )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(_lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return ".".join(str(_lowerCamelCase ) for v in version_tuple )
| 13
| 0
|
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=1000 ) -> List[Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase_ = n - 1
lowerCamelCase_ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase_ = 0
while count < prec:
lowerCamelCase_ = random.randint(2 , n - 1 )
lowerCamelCase_ = bin_exp_mod(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if b != 1:
lowerCamelCase_ = True
for _ in range(_lowerCamelCase ):
if b == n - 1:
lowerCamelCase_ = False
break
lowerCamelCase_ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 183
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_SCREAMING_SNAKE_CASE : Any = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_SCREAMING_SNAKE_CASE : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase__ ( _lowerCamelCase : str ) -> str:
if "://" in dataset_path:
lowerCamelCase_ = dataset_path.split('://' )[1]
return dataset_path
def lowerCamelCase__ ( _lowerCamelCase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase__ ( _lowerCamelCase : fsspec.AbstractFileSystem , _lowerCamelCase : str , _lowerCamelCase : str ) -> int:
lowerCamelCase_ = not is_remote_filesystem(_lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCamelCase ) , fs._strip_protocol(_lowerCamelCase ) )
else:
fs.mv(_lowerCamelCase , _lowerCamelCase , recursive=_lowerCamelCase )
def lowerCamelCase__ ( ) -> None:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = threading.Lock()
| 183
| 1
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
while b > 0:
if b & 1:
__UpperCAmelCase : int = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 320
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5"""
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase )
__UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase )
__UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" )
__UpperCAmelCase : int = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__UpperCAmelCase : Tuple = model.generate(**UpperCamelCase )
__UpperCAmelCase : Tuple = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5"""
__UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase )
__UpperCAmelCase : Tuple = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCamelCase ):
model.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Tuple = model.reverse_bettertransformer()
model.save_pretrained(UpperCamelCase )
| 320
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE : Any = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
_SCREAMING_SNAKE_CASE : str = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_INIT_CONFIGURATION
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = BertTokenizer
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=True , __lowerCamelCase : Union[str, Any]="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : List[Any]="[PAD]" , __lowerCamelCase : int="[CLS]" , __lowerCamelCase : Tuple="[MASK]" , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Any=None , **__lowerCamelCase : List[Any] , ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowerCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ = normalizer_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def lowercase_ ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : str=None ) -> List[str]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 314
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[str] , **__lowerCamelCase : Dict ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Any , **__lowerCamelCase : List[str] ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] , **__lowerCamelCase : int ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ = processor(images=__lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = AlignProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''lower newer'''
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 314
| 1
|
from math import sqrt
def a__ ( UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( UpperCAmelCase : int = 10_001 ) -> int:
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[str] = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 99
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCamelCase : List[str] = True
from torch.cuda.amp import autocast
_lowerCamelCase : Any = logging.getLogger(__name__)
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to log verbose messages or not."""} , )
UpperCamelCase = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
UpperCamelCase = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
UpperCamelCase = field(
default=0.9_9_9_9_9_5 , metadata={"""help""": """Decay of gumbel temperature during training."""} )
def a__ ( UpperCAmelCase : ModelArguments , UpperCAmelCase : TrainingArguments ) -> Any:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase : Any = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase : Any = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase : Any = logging.INFO
logger.setLevel(UpperCAmelCase )
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
UpperCamelCase = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
UpperCamelCase = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase = field(
default=2_0.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class __UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = "longest"
UpperCamelCase = None
UpperCamelCase = None
def __call__( self : int, __A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# reformat list to dict and set to pytorch format
UpperCAmelCase : List[Any] = self.feature_extractor.pad(
__A, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', )
UpperCAmelCase : int = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
UpperCAmelCase : Tuple = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
UpperCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase : Tuple = 1
UpperCAmelCase : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase : Dict = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__A, min_masks=2, )
return batch
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Union[str, Any], *__A : int, __A : Dict=1, __A : Any=0, __A : Optional[Any]=1.0, **__A : Any ):
super().__init__(*__A, **__A )
UpperCAmelCase : Any = 0
UpperCAmelCase : Any = max_gumbel_temp
UpperCAmelCase : Optional[Any] = min_gumbel_temp
UpperCAmelCase : str = gumbel_temp_decay
def __magic_name__ ( self : Dict, __A : nn.Module, __A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
UpperCAmelCase : List[Any] = self._prepare_inputs(__A )
if self.use_amp:
with autocast():
UpperCAmelCase : Optional[Any] = self.compute_loss(__A, __A )
else:
UpperCAmelCase : Optional[int] = self.compute_loss(__A, __A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase : str = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase : Any = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__A ).backward()
elif self.use_apex:
with amp.scale_loss(__A, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
return loss.detach()
def a__ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
configure_logger(UpperCAmelCase , UpperCAmelCase )
# Downloading and loading a dataset from the hub.
UpperCAmelCase : int = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase : Union[str, Any] = DatasetDict()
UpperCAmelCase : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase : Optional[Any] = DatasetDict()
UpperCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=UpperCAmelCase )
def prepare_dataset(UpperCAmelCase : Dict ):
# check that all files have the correct sampling rate
UpperCAmelCase , UpperCAmelCase : Optional[Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase : str = datasets.map(
UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
UpperCAmelCase : int = vectorized_datasets.filter(
lambda UpperCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(UpperCAmelCase : Dict ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase : Any = vectorized_datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase : Optional[int] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
UpperCAmelCase : Any = WavaVecaForPreTraining(UpperCAmelCase )
UpperCAmelCase : int = DataCollatorForWavaVecaPretraining(model=UpperCAmelCase , feature_extractor=UpperCAmelCase )
UpperCAmelCase : Any = WavaVecaPreTrainer(
model=UpperCAmelCase , data_collator=UpperCAmelCase , args=UpperCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=UpperCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 99
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : List[str] = 10 , UpperCAmelCase_ : Union[str, Any] = 22 ) -> str:
'''simple docstring'''
__snake_case : Union[str, Any] = range(1 , A__ )
__snake_case : Optional[Any] = range(1 , A__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 172
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _A ( A__ ):
"""simple docstring"""
__lowercase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowercase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__lowercase = [[0.0, 0.0], [0.0, 0.0]]
__lowercase , __lowercase = matrix[1][1], matrix[0][0]
__lowercase , __lowercase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowercase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__lowercase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowercase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowercase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowercase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowercase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowercase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowercase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowercase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowercase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowercase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowercase = array(A__ )
for i in range(3 ):
for j in range(3 ):
__lowercase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowercase = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 104
| 0
|
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = len(_snake_case )
__snake_case : str = sum(_snake_case )
__snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case : Optional[Any] = True
for i in range(1 , s + 1 ):
__snake_case : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case : Union[str, Any] = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case : List[str] = s - 2 * j
break
return diff
| 24
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='gptsan-japanese'
lowerCamelCase__ =[
'past_key_values',
]
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ):
'''simple docstring'''
__snake_case : Any = vocab_size
__snake_case : str = max_position_embeddings
__snake_case : Any = d_model
__snake_case : List[str] = d_ff
__snake_case : Dict = d_ext
__snake_case : Optional[Any] = d_spout
__snake_case : int = num_switch_layers
__snake_case : List[Any] = num_ext_layers
__snake_case : Any = num_switch_layers + num_ext_layers
__snake_case : Optional[int] = num_heads
__snake_case : Tuple = num_experts
__snake_case : List[Any] = expert_capacity
__snake_case : Dict = dropout_rate
__snake_case : Optional[Any] = layer_norm_epsilon
__snake_case : Dict = router_bias
__snake_case : str = router_jitter_noise
__snake_case : List[str] = router_dtype
__snake_case : Union[str, Any] = router_ignore_padding_tokens
__snake_case : List[str] = output_hidden_states
__snake_case : Optional[Any] = output_attentions
__snake_case : Any = initializer_factor
__snake_case : int = output_router_logits
__snake_case : Union[str, Any] = use_cache
super().__init__(
separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
| 24
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Any = {'vocab_file': 'spiece.model'}
snake_case_ : List[str] = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
snake_case_ : str = {'bert_for_seq_generation': 512}
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = []
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str]="<s>" ,lowerCamelCase__ : List[str]="</s>" ,lowerCamelCase__ : int="<unk>" ,lowerCamelCase__ : Dict="<pad>" ,lowerCamelCase__ : Optional[Any]="<::::>" ,lowerCamelCase__ : Optional[Dict[str, Any]] = None ,**lowerCamelCase__ : List[str] ,):
'''simple docstring'''
_UpperCamelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase__ ,)
_UpperCamelCase : Optional[Any] = vocab_file
_UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self : List[Any] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_UpperCamelCase : int = {}
_UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ ,out_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCamelCase__ )
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : Any = self.sp_model.IdToPiece(lowerCamelCase__ )
return token
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Dict = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
_UpperCamelCase : int = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCamelCase : Dict = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ ,'wb' ) as fi:
_UpperCamelCase : int = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 83
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
_SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""")
def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ):
snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) )
_SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 327
| 0
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = math.inf , SCREAMING_SNAKE_CASE = -math.inf , SCREAMING_SNAKE_CASE = math.inf , SCREAMING_SNAKE_CASE = -math.inf , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 0.01 , SCREAMING_SNAKE_CASE = 1 , ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = False
__UpperCamelCase :Tuple = search_prob
__UpperCamelCase :int = start_temperate
__UpperCamelCase :Any = []
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :Tuple = None
while not search_end:
__UpperCamelCase :Any = current_state.score()
if best_state is None or current_score > best_state.score():
__UpperCamelCase :str = current_state
scores.append(SCREAMING_SNAKE_CASE )
iterations += 1
__UpperCamelCase :Dict = None
__UpperCamelCase :Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__UpperCamelCase :Optional[int] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor
__UpperCamelCase :Optional[Any] = neighbors.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__UpperCamelCase :Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__UpperCamelCase :Dict = picked_neighbor
else:
__UpperCamelCase :Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__UpperCamelCase :Dict = picked_neighbor
__UpperCamelCase :Tuple = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__UpperCamelCase :Tuple = True
else:
__UpperCamelCase :int = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowercase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
__lowercase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
__lowercase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
__lowercase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
| 105
|
from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(SCREAMING_SNAKE_CASE ):
print(f"""{i}\t\t{d}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for j in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = [float('''inf''' )] * vertex_count
__UpperCamelCase :str = 0.0
for _ in range(vertex_count - 1 ):
for j in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__UpperCamelCase :Any = distance[u] + w
__UpperCamelCase :Tuple = check_negative_cycle(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase = int(input('''Enter number of vertices: ''').strip())
__lowercase = int(input('''Enter number of edges: ''').strip())
__lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__lowercase , __lowercase , __lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
__lowercase = int(input('''\nEnter shortest path source:''').strip())
__lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 105
| 1
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__lowercase = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase)
self.check_model_type(_lowerCAmelCase)
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = {}, {}
if padding is not None:
lowerCAmelCase = padding
if truncation is not None:
lowerCAmelCase = truncation
if top_k is not None:
lowerCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase):
"""simple docstring"""
if isinstance(_lowerCAmelCase , (Image.Image, str)) and isinstance(_lowerCAmelCase , _lowerCAmelCase):
lowerCAmelCase = {"""image""": image, """question""": question}
else:
lowerCAmelCase = image
lowerCAmelCase = super().__call__(_lowerCAmelCase , **_lowerCAmelCase)
return results
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False):
"""simple docstring"""
lowerCAmelCase = load_image(inputs["""image"""])
lowerCAmelCase = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=_lowerCAmelCase , truncation=_lowerCAmelCase)
lowerCAmelCase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework)
model_inputs.update(_lowerCAmelCase)
return model_inputs
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.model(**_lowerCAmelCase)
return model_outputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowerCAmelCase = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase = model_outputs.logits.sigmoid()[0]
lowerCAmelCase , lowerCAmelCase = probs.topk(_lowerCAmelCase)
else:
raise ValueError(f"Unsupported framework: {self.framework}")
lowerCAmelCase = scores.tolist()
lowerCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCAmelCase , _lowerCAmelCase)]
| 272
|
from __future__ import annotations
from collections import namedtuple
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float )->tuple:
'''simple docstring'''
snake_case_ = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159
| 0
|
'''simple docstring'''
from __future__ import annotations
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : List[str]=None ) -> Optional[Any]:
"""simple docstring"""
snake_case : Union[str, Any] = data
snake_case : Optional[int] = None
def __repr__( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case : str = []
snake_case : int = self
while temp:
string_rep.append(f'{temp.data}' )
snake_case : Union[str, Any] = temp.next
return "->".join(UpperCamelCase__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Dict:
'''simple docstring'''
if not elements_list:
raise Exception('''The Elements List is empty''' )
snake_case : Optional[Any] = Node(elements_list[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case : str = Node(elements_list[i] )
snake_case : str = current.next
return head
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if head_node is not None and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
print_reverse(head_node.next )
print(head_node.data )
def _UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
from doctest import testmod
testmod()
snake_case : Dict = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(SCREAMING_SNAKE_CASE__ )
print('''Elements in Reverse:''' )
print_reverse(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 83
|
'''simple docstring'''
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : list[int] ) -> None:
"""simple docstring"""
snake_case : List[Any] = len(UpperCamelCase__ )
snake_case : Tuple = [0] * len_array
if len_array > 0:
snake_case : List[str] = array[0]
for i in range(1 , UpperCamelCase__ ):
snake_case : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
snake_case : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
| 1
|
import string
import numpy
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , snake_case__ )
class lowercase__:
"""simple docstring"""
a :Tuple = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a :str = numpy.vectorize(lambda UpperCAmelCase : x % 36 )
a :int = numpy.vectorize(UpperCAmelCase )
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : numpy.ndarray ) -> None:
lowercase_ = self.modulus(SCREAMING_SNAKE_CASE_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowercase_ = encrypt_key.shape[0]
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : str ) -> int:
return self.key_string.index(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> str:
return self.key_string[round(SCREAMING_SNAKE_CASE_ )]
def _lowercase ( self : Optional[Any] ) -> None:
lowercase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ = det % len(self.key_string )
lowercase_ = len(self.key_string )
if greatest_common_divisor(SCREAMING_SNAKE_CASE_ , len(self.key_string ) ) != 1:
lowercase_ = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> str:
lowercase_ = [char for char in text.upper() if char in self.key_string]
lowercase_ = chars[-1]
while len(SCREAMING_SNAKE_CASE_ ) % self.break_key != 0:
chars.append(SCREAMING_SNAKE_CASE_ )
return "".join(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> str:
lowercase_ = self.process_text(text.upper() )
lowercase_ = ''''''
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - self.break_key + 1 , self.break_key ):
lowercase_ = text[i : i + self.break_key]
lowercase_ = [self.replace_letters(SCREAMING_SNAKE_CASE_ ) for char in batch]
lowercase_ = numpy.array([vec] ).T
lowercase_ = self.modulus(self.encrypt_key.dot(SCREAMING_SNAKE_CASE_ ) ).T.tolist()[
0
]
lowercase_ = ''''''.join(
self.replace_digits(SCREAMING_SNAKE_CASE_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _lowercase ( self : Union[str, Any] ) -> numpy.ndarray:
lowercase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ = det % len(self.key_string )
lowercase_ = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowercase_ = i
break
lowercase_ = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> str:
lowercase_ = self.make_decrypt_key()
lowercase_ = self.process_text(text.upper() )
lowercase_ = ''''''
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - self.break_key + 1 , self.break_key ):
lowercase_ = text[i : i + self.break_key]
lowercase_ = [self.replace_letters(SCREAMING_SNAKE_CASE_ ) for char in batch]
lowercase_ = numpy.array([vec] ).T
lowercase_ = self.modulus(decrypt_key.dot(SCREAMING_SNAKE_CASE_ ) ).T.tolist()[0]
lowercase_ = ''''''.join(
self.replace_digits(SCREAMING_SNAKE_CASE_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a ( ):
'''simple docstring'''
lowercase_ = int(input('''Enter the order of the encryption key: ''' ) )
lowercase_ = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(snake_case__ ):
lowercase_ = [int(snake_case__ ) for x in input().split()]
hill_matrix.append(snake_case__ )
lowercase_ = HillCipher(numpy.array(snake_case__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
lowercase_ = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
lowercase_ = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(snake_case__ ) )
elif option == "2":
lowercase_ = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 30
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__A : Tuple = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__A : Tuple = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Dict = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ), dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase, 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowerCAmelCase : List[str] = _readaa(_UpperCAmelCase )
if magic != 2_051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
lowerCAmelCase : Optional[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Any = _readaa(_UpperCAmelCase )
lowerCAmelCase : List[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = bytestream.read(rows * cols * num_images )
lowerCAmelCase : Any = numpy.frombuffer(_UpperCAmelCase, dtype=numpy.uinta )
lowerCAmelCase : Optional[int] = data.reshape(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, 1 )
return data
@deprecated(_UpperCAmelCase, 'Please use tf.one_hot on tensors.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = labels_dense.shape[0]
lowerCAmelCase : Union[str, Any] = numpy.arange(_UpperCAmelCase ) * num_classes
lowerCAmelCase : List[str] = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase : List[str] = 1
return labels_one_hot
@deprecated(_UpperCAmelCase, 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=10 ) -> List[str]:
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowerCAmelCase : List[str] = _readaa(_UpperCAmelCase )
if magic != 2_049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
lowerCAmelCase : Optional[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Dict = bytestream.read(_UpperCAmelCase )
lowerCAmelCase : Dict = numpy.frombuffer(_UpperCAmelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase, _UpperCAmelCase )
return labels
class __A :
@deprecated(
UpperCAmelCase_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=dtypes.floataa , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=None , ):
lowerCAmelCase , lowerCAmelCase : int = random_seed.get_seed(UpperCAmelCase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase : List[str] = dtypes.as_dtype(UpperCAmelCase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
lowerCAmelCase : Dict = 10000
lowerCAmelCase : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase : Optional[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase : Optional[int] = images.astype(numpy.floataa )
lowerCAmelCase : Dict = numpy.multiply(UpperCAmelCase_ , 1.0 / 2_55.0 )
lowerCAmelCase : List[str] = images
lowerCAmelCase : List[str] = labels
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[int] = 0
@property
def lowercase__ ( self : str ):
return self._images
@property
def lowercase__ ( self : Dict ):
return self._labels
@property
def lowercase__ ( self : List[Any] ):
return self._num_examples
@property
def lowercase__ ( self : Any ):
return self._epochs_completed
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=True ):
if fake_data:
lowerCAmelCase : Union[str, Any] = [1] * 784
lowerCAmelCase : Dict = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCAmelCase_ )],
[fake_label for _ in range(UpperCAmelCase_ )],
)
lowerCAmelCase : Union[str, Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.images[perma]
lowerCAmelCase : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase : Tuple = self._num_examples - start
lowerCAmelCase : Union[str, Any] = self._images[start : self._num_examples]
lowerCAmelCase : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.images[perm]
lowerCAmelCase : Optional[Any] = self.labels[perm]
# Start next epoch
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Dict = batch_size - rest_num_examples
lowerCAmelCase : int = self._index_in_epoch
lowerCAmelCase : Union[str, Any] = self._images[start:end]
lowerCAmelCase : int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase : Optional[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase, 'Please write your own downloading logic.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase, _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
lowerCAmelCase : List[Any] = f.size()
print('Successfully downloaded', _UpperCAmelCase, _UpperCAmelCase, 'bytes.' )
return filepath
@deprecated(
_UpperCAmelCase, 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=dtypes.floataa, _UpperCAmelCase=True, _UpperCAmelCase=5_000, _UpperCAmelCase=None, _UpperCAmelCase=DEFAULT_SOURCE_URL, ) -> Tuple:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=_UpperCAmelCase, one_hot=_UpperCAmelCase, dtype=_UpperCAmelCase, seed=_UpperCAmelCase )
lowerCAmelCase : Tuple = fake()
lowerCAmelCase : Optional[Any] = fake()
lowerCAmelCase : List[Any] = fake()
return _Datasets(train=_UpperCAmelCase, validation=_UpperCAmelCase, test=_UpperCAmelCase )
if not source_url: # empty string check
lowerCAmelCase : Any = DEFAULT_SOURCE_URL
lowerCAmelCase : Optional[Any] = 'train-images-idx3-ubyte.gz'
lowerCAmelCase : Any = 'train-labels-idx1-ubyte.gz'
lowerCAmelCase : int = 't10k-images-idx3-ubyte.gz'
lowerCAmelCase : Union[str, Any] = 't10k-labels-idx1-ubyte.gz'
lowerCAmelCase : str = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + train_images_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : Any = _extract_images(_UpperCAmelCase )
lowerCAmelCase : Tuple = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : int = _extract_labels(_UpperCAmelCase, one_hot=_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + test_images_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : List[Any] = _extract_images(_UpperCAmelCase )
lowerCAmelCase : Any = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : List[str] = _extract_labels(_UpperCAmelCase, one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
lowerCAmelCase : str = (
'Validation size should be between 0 and '
f"{len(_UpperCAmelCase )}. Received: {validation_size}."
)
raise ValueError(_UpperCAmelCase )
lowerCAmelCase : str = train_images[:validation_size]
lowerCAmelCase : Dict = train_labels[:validation_size]
lowerCAmelCase : List[str] = train_images[validation_size:]
lowerCAmelCase : str = train_labels[validation_size:]
lowerCAmelCase : str = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowerCAmelCase : int = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase, validation=_UpperCAmelCase, test=_UpperCAmelCase )
| 138
| 0
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase_ = 16
lowerCamelCase_ = 32
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Dict = 16 , __A : Optional[Any] = "bert-base-cased" ) -> Any:
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" )
def tokenize_function(__A : str ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_SCREAMING_SNAKE_CASE = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__A : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Dict ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE = config['''lr''']
_SCREAMING_SNAKE_CASE = int(config["num_epochs"] )
_SCREAMING_SNAKE_CASE = int(config["seed"] )
_SCREAMING_SNAKE_CASE = int(config["batch_size"] )
_SCREAMING_SNAKE_CASE = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_SCREAMING_SNAKE_CASE = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
_SCREAMING_SNAKE_CASE = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE__ , )
else:
_SCREAMING_SNAKE_CASE = DummyScheduler(SCREAMING_SNAKE_CASE__ , total_num_steps=SCREAMING_SNAKE_CASE__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
_SCREAMING_SNAKE_CASE = 0
# We also need to keep track of the stating epoch so files are named properly
_SCREAMING_SNAKE_CASE = 0
# Now we train the model
_SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = {}
for epoch in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE = outputs.loss
_SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_SCREAMING_SNAKE_CASE = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
_SCREAMING_SNAKE_CASE = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_SCREAMING_SNAKE_CASE = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
_SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
_SCREAMING_SNAKE_CASE = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=SCREAMING_SNAKE_CASE__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE__ , )
parser.add_argument(
"--output_dir" , type=SCREAMING_SNAKE_CASE__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="Number of train epochs." , )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 367
|
'''simple docstring'''
import torch
from transformers import AutoModel
class lowercase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : Union[str, Any]="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__lowerCamelCase , self ).__init__()
_SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(__lowerCamelCase , return_dict=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.nn.CosineSimilarity(3 , 1e-08 )
_SCREAMING_SNAKE_CASE = torch.nn.Softmax(dim=1 )
def lowerCAmelCase_ ( self : Dict , **__lowerCamelCase : Any ):
"""simple docstring"""
return self.bert(**__lowerCamelCase ).last_hidden_state
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__lowerCamelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__lowerCamelCase , __lowerCamelCase ) )
def lowerCAmelCase_ ( self : int , __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = W_supports["sizes"].tolist()
_SCREAMING_SNAKE_CASE = W_supports["start_token_id"].item()
_SCREAMING_SNAKE_CASE = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_SCREAMING_SNAKE_CASE = self.BERT(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.BERT(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = W_supports["input_ids"] == start_token_id
_SCREAMING_SNAKE_CASE = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__lowerCamelCase ):
if i == 0:
_SCREAMING_SNAKE_CASE = 0
else:
_SCREAMING_SNAKE_CASE = support_sizes[i - 1]
_SCREAMING_SNAKE_CASE = S[s : s + size][start_token_masks[s : s + size]]
_SCREAMING_SNAKE_CASE = S[s : s + size][end_token_masks[s : s + size]]
_SCREAMING_SNAKE_CASE = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_SCREAMING_SNAKE_CASE = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_SCREAMING_SNAKE_CASE = torch.vstack((p_starts, p_start) )
_SCREAMING_SNAKE_CASE = torch.vstack((p_ends, p_end) )
else:
_SCREAMING_SNAKE_CASE = p_start
_SCREAMING_SNAKE_CASE = p_end
return p_starts, p_ends
| 111
| 0
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = GPTSwaTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = False
def _snake_case ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Dict = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ,a_ ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = """This is a test"""
_UpperCAmelCase : List[Any] = """This is a test"""
return input_text, output_text
def _snake_case ( self ) -> int:
_UpperCAmelCase : str = """<s>"""
_UpperCAmelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(_lowerCAmelCase ) ,2_000 )
def _snake_case ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size ,2_000 )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Union[str, Any] = GPTSwaTokenizer(_lowerCAmelCase )
_UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[465, 287, 265, 631, 842] )
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,)
# fmt: on
_UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase ,[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] ,)
_UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
# fmt: off
self.assertListEqual(
_lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def _snake_case ( self ) -> Any:
_UpperCAmelCase : int = GPTSwaTokenizer(_lowerCAmelCase )
_UpperCAmelCase : int = ["""This is a test""", """I was born in 92000, and this is falsé."""]
_UpperCAmelCase : Any = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase )
@slow
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
_UpperCAmelCase : Union[str, Any] = {"""input_ids""": [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
| 215
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return np.maximum(0 , UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258
| 0
|
import os
from pathlib import Path
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ : int ={
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
lowerCamelCase__ : Tuple =f'''{src_lang}-{tgt_lang}'''
lowerCamelCase__ : str =f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
lowerCamelCase__ : List[str] =os.path.join(__lowerCamelCase , '''README.md''' )
print(f'''Generating {path}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(__lowerCamelCase )
# make sure we are under the root of the project
_lowercase : List[str] = Path(__file__).resolve().parent.parent.parent
_lowercase : Any = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowercase : List[str] = model_name.split("-")
_lowercase : int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 353
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ : Optional[Any] ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
lowerCamelCase__ : Any =f'''{src_lang}-{tgt_lang}'''
lowerCamelCase__ : Any =f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
lowerCamelCase__ : str =os.path.join(__lowerCamelCase , '''README.md''' )
print(f'''Generating {path}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(__lowerCamelCase )
# make sure we are under the root of the project
_lowercase : List[str] = Path(__file__).resolve().parent.parent.parent
_lowercase : Dict = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowercase : int = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 272
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""DeiTFeatureExtractor"""]
a_ = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
# setable values
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
return cls(common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase )
@dataclass
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowerCamelCase = 42
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , __lowerCamelCase = 1000 , __lowerCamelCase = 0.0_0_0_1 , __lowerCamelCase = 0.0_2 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = "fixed_small" , __lowerCamelCase = True , __lowerCamelCase = "epsilon" , __lowerCamelCase = jnp.floataa , ):
'''simple docstring'''
__A : Tuple = dtype
def UpperCamelCase__( self , __lowerCamelCase = None ):
'''simple docstring'''
if common is None:
__A : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__A : Tuple = jnp.array(1.0 , dtype=self.dtype )
__A : Optional[int] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
return sample
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = () ):
'''simple docstring'''
__A : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__A : Optional[Any] = (jnp.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ):
'''simple docstring'''
__A : int = state.common.alphas_cumprod[t]
__A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__A : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__A : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__A : List[Any] = jnp.clip(__lowerCamelCase , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__A : Optional[Any] = jnp.log(jnp.clip(__lowerCamelCase , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
__A : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__A : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__A : Optional[Any] = variance
__A : Optional[Any] = state.common.betas[t]
__A : Any = (predicted_variance + 1) / 2
__A : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True , ):
'''simple docstring'''
__A : Optional[int] = timestep
if key is None:
__A : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__A , __A : Tuple = jnp.split(__lowerCamelCase , sample.shape[1] , axis=1 )
else:
__A : List[str] = None
# 1. compute alphas, betas
__A : Dict = state.common.alphas_cumprod[t]
__A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__A : Tuple = 1 - alpha_prod_t
__A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__A : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__A : Any = model_output
elif self.config.prediction_type == "v_prediction":
__A : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__A : str = jnp.clip(__lowerCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__A : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__A : List[Any] = jax.random.split(__lowerCamelCase , num=1 )
__A : List[str] = jax.random.normal(__lowerCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__lowerCamelCase , __lowerCamelCase , predicted_variance=__lowerCamelCase ) ** 0.5) * noise
__A : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__A : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCamelCase , state=__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return add_noise_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return get_velocity_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 179
| 0
|
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowercase_ = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[str, int]] = None
__UpperCAmelCase : Optional[Union[str, int]] = None
__UpperCAmelCase : Optional[Union[str, int]] = None
def __UpperCAmelCase ( self ):
__a , __a , __a = _str_to_version_tuple(self.version_str )
def __repr__( self ):
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def __UpperCAmelCase ( self ):
return self.major, self.minor, self.patch
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , _a ):
return Version(_a )
elif isinstance(_a , _a ):
return other
raise TypeError(f'''{other} (type {type(_a )}) cannot be compared to version.''' )
def __eq__( self , _a ):
try:
__a = self._validate_operand(_a )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , _a ):
__a = self._validate_operand(_a )
return self.tuple < other.tuple
def __hash__( self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __UpperCAmelCase ( cls , _a ):
__a = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __UpperCAmelCase ( self ):
return self.version_str
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Tuple:
__a = _VERSION_REG.match(lowerCAmelCase__ )
if not res:
raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(lowerCAmelCase__ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def lowercase ( lowerCAmelCase__ : str ) -> Optional[Any]:
return ".".join(str(lowerCAmelCase__ ) for v in version_tuple )
| 11
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , PIL.Image.Image ):
__a = 1
elif isinstance(_a , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' )
if isinstance(_a , PIL.Image.Image ):
__a = preprocess(_a )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__a = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(_a ).sample
__a = torch.clamp(_a , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 11
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ = get_logger(__name__)
class _UpperCamelCase ( enum.Enum ):
'''simple docstring'''
lowerCamelCase__ ='all_checks'
lowerCamelCase__ ='basic_checks'
lowerCamelCase__ ='no_checks'
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a , _a=None):
if expected_checksums is None:
logger.info("Unable to verify checksums.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedDownloadedFile(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE : Tuple = " for " + verification_name if verification_name is not None else ""
if len(_a) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error")
logger.info("All the checksums matched successfully" + for_verification_name)
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a):
if expected_splits is None:
logger.info("Unable to verify splits sizes.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreSplits(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedSplits(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : List[str] = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_a) > 0:
raise NonMatchingSplitsSizesError(str(_a))
logger.info("All the splits matched successfully.")
def lowerCamelCase__ ( _a , _a = True):
if record_checksum:
SCREAMING_SNAKE_CASE : List[str] = shaaaa()
with open(_a , "rb") as f:
for chunk in iter(lambda: f.read(1 << 20) , b""):
m.update(_a)
SCREAMING_SNAKE_CASE : Optional[int] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE : List[str] = None
return {"num_bytes": os.path.getsize(_a), "checksum": checksum}
def lowerCamelCase__ ( _a):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 76
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : str , _snake_case : str ):
lowerCAmelCase : Optional[int] = len(_snake_case )
lowerCAmelCase : List[Any] = []
for i in range(len(_snake_case ) - pat_len + 1 ):
lowerCAmelCase : Union[str, Any] = True
for j in range(_snake_case ):
if s[i + j] != pattern[j]:
lowerCAmelCase : str = False
break
if match_found:
position.append(_snake_case )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 314
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class snake_case_( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCamelCase_ : float , UpperCamelCase_ : Callable , UpperCamelCase_ : int , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : str = None , ):
super().__init__()
lowerCAmelCase : Dict = initial_learning_rate
lowerCAmelCase : List[str] = warmup_steps
lowerCAmelCase : Union[str, Any] = power
lowerCAmelCase : Dict = decay_schedule_fn
lowerCAmelCase : str = name
def __call__( self : Dict , UpperCamelCase_ : Optional[Any] ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase : Dict = tf.cast(UpperCamelCase_ , tf.floataa )
lowerCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase : str = global_step_float / warmup_steps_float
lowerCAmelCase : Any = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def lowerCamelCase__ ( self : str ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _snake_case ( _snake_case : float , _snake_case : int , _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 0.9 , _snake_case : float = 0.999 , _snake_case : float = 1E-8 , _snake_case : Optional[float] = None , _snake_case : Optional[float] = None , _snake_case : float = 0.0 , _snake_case : float = 1.0 , _snake_case : Optional[List[str]] = None , ):
lowerCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_snake_case , )
if num_warmup_steps:
lowerCAmelCase : List[str] = WarmUp(
initial_learning_rate=_snake_case , decay_schedule_fn=_snake_case , warmup_steps=_snake_case , )
if weight_decay_rate > 0.0:
lowerCAmelCase : Dict = AdamWeightDecay(
learning_rate=_snake_case , weight_decay_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_snake_case , )
else:
lowerCAmelCase : Any = tf.keras.optimizers.Adam(
learning_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class snake_case_( a__ ):
def __init__( self : Optional[int] , UpperCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1E-7 , UpperCamelCase_ : bool = False , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "AdamWeightDecay" , **UpperCamelCase_ : List[Any] , ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = weight_decay_rate
lowerCAmelCase : List[str] = include_in_weight_decay
lowerCAmelCase : Union[str, Any] = exclude_from_weight_decay
@classmethod
def lowerCamelCase__ ( cls : int , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Tuple = {'''WarmUp''': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ):
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ):
lowerCAmelCase, lowerCAmelCase : List[Any] = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase : Dict = apply_state or {}
lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase : Optional[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=None ):
lowerCAmelCase, lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : List[str] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : Tuple = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : str = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class snake_case_( a__ ):
def __init__( self : Any ):
lowerCAmelCase : Any = []
lowerCAmelCase : List[str] = None
@property
def lowerCamelCase__ ( self : List[str] ):
if self._accum_steps is None:
lowerCAmelCase : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCamelCase__ ( self : Any ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCamelCase_ : List[Any] ):
if not self._gradients:
lowerCAmelCase : Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def lowerCamelCase__ ( self : Union[str, Any] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
| 314
| 1
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase = 100
UpperCAmelCase = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase = set()
lowercase = 42
lowercase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 5000 ):
for number_to_partition in range(1 , lowerCamelCase__ ):
if len(partition(lowerCamelCase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 195
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Union[str, Any] = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[str] = "canine"
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=768 , snake_case__ : Tuple=12 , snake_case__ : Optional[Any]=12 , snake_case__ : Union[str, Any]=3072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=1_6384 , snake_case__ : str=16 , snake_case__ : Tuple=0.02 , snake_case__ : Dict=1E-12 , snake_case__ : Any=0 , snake_case__ : Optional[int]=0xe_000 , snake_case__ : List[str]=0xe_001 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : List[Any]=8 , snake_case__ : List[str]=1_6384 , snake_case__ : Union[str, Any]=128 , **snake_case__ : Tuple , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCamelCase_ : Tuple =max_position_embeddings
lowerCamelCase_ : Optional[int] =hidden_size
lowerCamelCase_ : Tuple =num_hidden_layers
lowerCamelCase_ : Dict =num_attention_heads
lowerCamelCase_ : str =intermediate_size
lowerCamelCase_ : Dict =hidden_act
lowerCamelCase_ : List[Any] =hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase_ : Dict =initializer_range
lowerCamelCase_ : Tuple =type_vocab_size
lowerCamelCase_ : Optional[Any] =layer_norm_eps
# Character config:
lowerCamelCase_ : List[str] =downsampling_rate
lowerCamelCase_ : List[Any] =upsampling_kernel_size
lowerCamelCase_ : Any =num_hash_functions
lowerCamelCase_ : Optional[int] =num_hash_buckets
lowerCamelCase_ : Union[str, Any] =local_transformer_stride
| 144
| 0
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class a_ ( _lowerCAmelCase ):
def __init__( self : List[Any] , **lowercase : Optional[int] ):
"""simple docstring"""
super().__init__(**lowercase )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , "vision" )
self.check_model_type(lowercase )
def __call__( self : Tuple , lowercase : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase : Union[str, List[str]] = None , **lowercase : str , ):
"""simple docstring"""
if "text_queries" in kwargs:
lowercase_ :List[Any] = kwargs.pop("text_queries" )
if isinstance(lowercase , (str, Image.Image) ):
lowercase_ :List[str] = {"image": image, "candidate_labels": candidate_labels}
else:
lowercase_ :Optional[Any] = image
lowercase_ :str = super().__call__(lowercase , **lowercase )
return results
def lowercase__ ( self : Optional[int] , **lowercase : List[str] ):
"""simple docstring"""
lowercase_ :Tuple = {}
if "threshold" in kwargs:
lowercase_ :Dict = kwargs["threshold"]
if "top_k" in kwargs:
lowercase_ :Optional[Any] = kwargs["top_k"]
return {}, {}, postprocess_params
def lowercase__ ( self : List[Any] , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Any = load_image(inputs["image"] )
lowercase_ :List[str] = inputs["candidate_labels"]
if isinstance(lowercase , lowercase ):
lowercase_ :Union[str, Any] = candidate_labels.split("," )
lowercase_ :Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase ):
lowercase_ :Union[str, Any] = self.tokenizer(lowercase , return_tensors=self.framework )
lowercase_ :Tuple = self.image_processor(lowercase , return_tensors=self.framework )
yield {
"is_last": i == len(lowercase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self : List[str] , lowercase : List[str] ):
"""simple docstring"""
lowercase_ :Dict = model_inputs.pop("target_size" )
lowercase_ :str = model_inputs.pop("candidate_label" )
lowercase_ :List[Any] = model_inputs.pop("is_last" )
lowercase_ :Optional[Any] = self.model(**lowercase )
lowercase_ :str = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowercase__ ( self : Optional[int] , lowercase : List[str] , lowercase : List[str]=0.1 , lowercase : Optional[int]=None ):
"""simple docstring"""
lowercase_ :Dict = []
for model_output in model_outputs:
lowercase_ :int = model_output["candidate_label"]
lowercase_ :str = BaseModelOutput(lowercase )
lowercase_ :List[Any] = self.image_processor.post_process_object_detection(
outputs=lowercase , threshold=lowercase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
lowercase_ :Optional[int] = outputs["scores"][index].item()
lowercase_ :int = self._get_bounding_box(outputs["boxes"][index][0] )
lowercase_ :int = {"score": score, "label": label, "box": box}
results.append(lowercase )
lowercase_ :Dict = sorted(lowercase , key=lambda lowercase : x["score"] , reverse=lowercase )
if top_k:
lowercase_ :List[str] = results[:top_k]
return results
def lowercase__ ( self : Union[str, Any] , lowercase : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
lowercase_ , lowercase_ , lowercase_ , lowercase_ :List[str] = box.int().tolist()
lowercase_ :List[Any] = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 147
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase : Any ={
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase : int =logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
__A = "maskformer"
__A = {"hidden_size": "mask_feature_size"}
__A = ["resnet", "swin"]
__A = ["detr"]
def __init__( self : List[Any] , lowercase : int = 256 , lowercase : int = 256 , lowercase : float = 0.1 , lowercase : bool = False , lowercase : Optional[Dict] = None , lowercase : Optional[Dict] = None , lowercase : float = 0.02 , lowercase : float = 1.0 , lowercase : float = 1.0 , lowercase : float = 1.0 , lowercase : float = 20.0 , lowercase : Optional[bool] = None , **lowercase : Any , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase_ :Any = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(lowercase , lowercase ):
lowercase_ :Optional[int] = backbone_config.pop("model_type" )
lowercase_ :Optional[int] = CONFIG_MAPPING[backbone_model_type]
lowercase_ :int = config_class.from_dict(lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase_ :Optional[Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase_ :Tuple = (
decoder_config.pop("model_type" ) if isinstance(lowercase , lowercase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(lowercase , lowercase ):
lowercase_ :str = CONFIG_MAPPING[decoder_type]
lowercase_ :List[str] = config_class.from_dict(lowercase )
lowercase_ :str = backbone_config
lowercase_ :Union[str, Any] = decoder_config
# main feature dimension for the model
lowercase_ :Any = fpn_feature_size
lowercase_ :Optional[int] = mask_feature_size
# initializer
lowercase_ :List[Any] = init_std
lowercase_ :Union[str, Any] = init_xavier_std
# Hungarian matcher && loss
lowercase_ :List[str] = cross_entropy_weight
lowercase_ :int = dice_weight
lowercase_ :List[str] = mask_weight
lowercase_ :Optional[Any] = use_auxiliary_loss
lowercase_ :str = no_object_weight
lowercase_ :int = output_auxiliary_logits
lowercase_ :Optional[Any] = self.decoder_config.encoder_attention_heads
lowercase_ :int = self.decoder_config.num_hidden_layers
super().__init__(**lowercase )
@classmethod
def lowercase__ ( cls : Tuple , lowercase : PretrainedConfig , lowercase : PretrainedConfig , **lowercase : Union[str, Any] ):
"""simple docstring"""
return cls(
backbone_config=lowercase , decoder_config=lowercase , **lowercase , )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :str = copy.deepcopy(self.__dict__ )
lowercase_ :int = self.backbone_config.to_dict()
lowercase_ :List[Any] = self.decoder_config.to_dict()
lowercase_ :Optional[Any] = self.__class__.model_type
return output
| 147
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : str = logging.get_logger(__name__)
A__ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : List[str] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
A__ : Optional[int] = {
'gpt2': 10_24,
'gpt2-medium': 10_24,
'gpt2-large': 10_24,
'gpt2-xl': 10_24,
'distilgpt2': 10_24,
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = GPTaTokenizer
def __init__( self : Dict, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : List[Any]=None, lowerCamelCase : Optional[int]=None, lowerCamelCase : Any="<|endoftext|>", lowerCamelCase : str="<|endoftext|>", lowerCamelCase : Any="<|endoftext|>", lowerCamelCase : str=False, **lowerCamelCase : Tuple, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, lowerCamelCase, tokenizer_file=lowerCamelCase, unk_token=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, add_prefix_space=lowerCamelCase, **lowerCamelCase, )
lowercase__ = kwargs.pop('''add_bos_token''', lowerCamelCase )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', lowerCamelCase ) != add_prefix_space:
lowercase__ = getattr(lowerCamelCase, pre_tok_state.pop('''type''' ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCamelCase )
lowercase__ = add_prefix_space
def lowercase__ ( self : str, *lowerCamelCase : List[str], **lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = kwargs.get('''is_split_into_words''', lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = kwargs.get('''is_split_into_words''', lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : Dict, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
def lowercase__ ( self : Dict, lowerCamelCase : "Conversation" ):
'''simple docstring'''
lowercase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase, add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
lowercase__ = input_ids[-self.model_max_length :]
return input_ids
| 207
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 207
| 1
|
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 144
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def _A ( ) -> Tuple:
'''simple docstring'''
__lowercase = _ask_options(
"In which compute environment are you running?", ["This machine", "AWS (Amazon SageMaker)"], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase = get_sagemaker_input()
else:
__lowercase = get_cluster_input()
return config
def _A ( UpperCamelCase_ : Union[str, Any]=None) -> Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
__lowercase = subparsers.add_parser("config", description=UpperCamelCase_)
else:
__lowercase = argparse.ArgumentParser("Accelerate config command", description=UpperCamelCase_)
parser.add_argument(
"--config_file", default=UpperCamelCase_, help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
), )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase_)
return parser
def _A ( UpperCamelCase_ : Dict) -> str:
'''simple docstring'''
__lowercase = get_user_input()
if args.config_file is not None:
__lowercase = args.config_file
else:
if not os.path.isdir(UpperCamelCase_):
os.makedirs(UpperCamelCase_)
__lowercase = default_yaml_config_file
if config_file.endswith(".json"):
config.to_json_file(UpperCamelCase_)
else:
config.to_yaml_file(UpperCamelCase_)
print(F"""accelerate configuration saved at {config_file}""")
def _A ( ) -> Optional[Any]:
'''simple docstring'''
__lowercase = config_command_parser()
__lowercase = parser.parse_args()
config_command(UpperCamelCase_)
if __name__ == "__main__":
main()
| 144
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class snake_case_:
__UpperCamelCase = 42
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = None
__UpperCamelCase = field(default='''Translation''' , init=a__ , repr=a__ )
def __call__( self : Union[str, Any] ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowerCamelCase__ ( self : List[Any] ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class snake_case_:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = None
__UpperCamelCase = field(default='''TranslationVariableLanguages''' , init=a__ , repr=a__ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = sorted(set(self.languages ) ) if self.languages else None
lowerCAmelCase : int = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : List[Any] = set(self.languages )
if self.languages and set(UpperCamelCase_ ) - lang_set:
raise ValueError(
F'''Some languages in example ({", ".join(sorted(set(UpperCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(UpperCamelCase_ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCAmelCase : List[str] = []
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCAmelCase, lowerCAmelCase : Optional[Any] = zip(*sorted(UpperCamelCase_ ) )
return {"language": languages, "translation": translations}
def lowerCamelCase__ ( self : Dict ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 60
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _snake_case ( _snake_case : list[list[float]] ):
lowerCAmelCase : str = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_snake_case ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase : int = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase : Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase, lowerCAmelCase : List[Any] = matrix[1][1], matrix[0][0]
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_snake_case ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_snake_case ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase : int = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
lowerCAmelCase : Dict = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase : List[str] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase : Dict = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase : Any = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase : Any = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase : Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase : Optional[int] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase : Dict = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase : List[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase : str = array(_snake_case )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase : Optional[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase : Tuple = array(_snake_case )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_snake_case )
# Calculate the inverse of the matrix
return [[float(d(_snake_case ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 60
| 1
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : List[Any] = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
A_ : Dict = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
A_ : Tuple = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
A_ : List[Any] = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 16_000,
"return_attention_mask": False,
"do_normalize": True,
}
A_ : Tuple = tempfile.mkdtemp()
A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A_ : Union[str, Any] = os.path.join(self.tmpdirname , __snake_case )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__snake_case ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__snake_case ) + "\n" )
# load decoder from hub
A_ : List[str] = "hf-internal-testing/ngram-beam-search-decoder"
def SCREAMING_SNAKE_CASE ( self :Dict , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = self.add_kwargs_tokens_map.copy()
kwargs.update(__snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE ( self :str , **snake_case :List[Any] ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , **snake_case :Any ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : str = self.get_tokenizer()
A_ : List[str] = self.get_feature_extractor()
A_ : Optional[Any] = self.get_decoder()
A_ : Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
A_ : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(__snake_case , "include" ):
WavaVecaProcessorWithLM(
tokenizer=__snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = self.get_feature_extractor()
A_ : int = self.get_tokenizer()
A_ : str = self.get_decoder()
A_ : Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
A_ : List[str] = floats_list((3, 1_000) )
A_ : Any = feature_extractor(__snake_case , return_tensors="np" )
A_ : List[Any] = processor(__snake_case , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : str = self.get_feature_extractor()
A_ : Dict = self.get_tokenizer()
A_ : Dict = self.get_decoder()
A_ : str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
A_ : List[Any] = "This is a test string"
A_ : Dict = processor(text=__snake_case )
A_ : Optional[Any] = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :List[Any]=(2, 10, 16) , snake_case :List[Any]=77 ):
'''simple docstring'''
np.random.seed(__snake_case )
return np.random.rand(*__snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = self.get_feature_extractor()
A_ : Any = self.get_tokenizer()
A_ : Union[str, Any] = self.get_decoder()
A_ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
A_ : Union[str, Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
A_ : List[str] = processor.decode(__snake_case )
A_ : Dict = decoder.decode_beams(__snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = self.get_feature_extractor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Optional[int] = self.get_decoder()
A_ : Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
A_ : Any = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
A_ : Optional[Any] = processor.batch_decode(__snake_case )
else:
with get_context(__snake_case ).Pool() as pool:
A_ : Union[str, Any] = processor.batch_decode(__snake_case , __snake_case )
A_ : int = list(__snake_case )
with get_context("fork" ).Pool() as p:
A_ : Tuple = decoder.decode_beams_batch(__snake_case , __snake_case )
A_ , A_ , A_ : Union[str, Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__snake_case , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(__snake_case , decoded_processor.logit_score )
self.assertListEqual(__snake_case , decoded_processor.lm_score )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[Any] = self.get_feature_extractor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Tuple = self.get_decoder()
A_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
A_ : List[Any] = self._get_dummy_logits()
A_ : List[str] = 15
A_ : Optional[int] = -20.0
A_ : Dict = -4.0
A_ : Optional[int] = processor.batch_decode(
__snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
A_ : Optional[Any] = decoded_processor_out.text
A_ : List[str] = list(__snake_case )
with get_context("fork" ).Pool() as pool:
A_ : Union[str, Any] = decoder.decode_beams_batch(
__snake_case , __snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
A_ : int = [d[0][0] for d in decoded_decoder_out]
A_ : int = [d[0][2] for d in decoded_decoder_out]
A_ : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __snake_case )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __snake_case , atol=1e-3 ) )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __snake_case , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : List[str] = self.get_feature_extractor()
A_ : List[str] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_decoder()
A_ : int = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
A_ : Optional[Any] = self._get_dummy_logits()
A_ : str = 2.0
A_ : str = 5.0
A_ : Any = -20.0
A_ : List[str] = True
A_ : Optional[Any] = processor.batch_decode(
__snake_case , alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
A_ : Tuple = decoded_processor_out.text
A_ : List[Any] = list(__snake_case )
decoder.reset_params(
alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
with get_context("fork" ).Pool() as pool:
A_ : List[Any] = decoder.decode_beams_batch(
__snake_case , __snake_case , )
A_ : List[Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __snake_case )
A_ : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
A_ : Dict = processor.decoder.model_container[processor.decoder._model_key]
A_ : Union[str, Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
A_ : List[Any] = os.listdir(__snake_case )
A_ : Any = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__snake_case , __snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Dict = snapshot_download("hf-internal-testing/processor_with_lm" )
A_ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(__snake_case )
A_ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
A_ : Optional[Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
A_ : int = os.listdir(__snake_case )
A_ : Any = os.listdir(__snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__snake_case , __snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
A_ : int = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
A_ : Any = floats_list((3, 1_000) )
A_ : str = processor_wavaveca(__snake_case , return_tensors="np" )
A_ : Optional[int] = processor_auto(__snake_case , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
A_ : Tuple = self._get_dummy_logits()
A_ : List[Any] = processor_wavaveca.batch_decode(__snake_case )
A_ : int = processor_auto.batch_decode(__snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Optional[int] = self.get_feature_extractor()
A_ : Any = self.get_tokenizer()
A_ : int = self.get_decoder()
A_ : Any = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :Optional[int] , snake_case :Optional[Any] ):
'''simple docstring'''
A_ : Any = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
A_ : List[str] = self._get_dummy_logits()[0]
A_ : List[str] = processor.decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
A_ : List[Any] = self._get_dummy_logits()
A_ : List[Any] = processor.batch_decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(__snake_case , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
import torch
A_ : Tuple = load_dataset("common_voice" , "en" , split="train" , streaming=__snake_case )
A_ : List[str] = ds.cast_column("audio" , datasets.Audio(sampling_rate=16_000 ) )
A_ : Tuple = iter(__snake_case )
A_ : List[str] = next(__snake_case )
A_ : int = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
A_ : Dict = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
A_ : str = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
A_ : Dict = model(__snake_case ).logits.cpu().numpy()
A_ : Tuple = processor.decode(logits[0] , output_word_offsets=__snake_case )
A_ : List[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
A_ : Tuple = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
A_ : Dict = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(__snake_case , "word" ) ) , __snake_case )
self.assertEqual(" ".join(self.get_from_offsets(__snake_case , "word" ) ) , output.text )
# output times
A_ : Dict = torch.tensor(self.get_from_offsets(__snake_case , "start_time" ) )
A_ : Optional[int] = torch.tensor(self.get_from_offsets(__snake_case , "end_time" ) )
# fmt: off
A_ : Dict = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
A_ : Any = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
| 370
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_lowerCAmelCase : Any = (3, 9, -11, 0, 7, 5, 1, -1)
_lowerCAmelCase : Any = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ :
"""simple docstring"""
def __init__( self :str , snake_case :Iterable[int] ):
'''simple docstring'''
A_ : Node | None = None
for i in sorted(snake_case , reverse=snake_case ):
A_ : str = Node(snake_case , self.head )
def __iter__( self :Any ):
'''simple docstring'''
A_ : List[Any] = self.head
while node:
yield node.data
A_ : Optional[int] = node.next_node
def __len__( self :Tuple ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self :Tuple ):
'''simple docstring'''
return " -> ".join([str(snake_case ) for node in self] )
def __snake_case ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 70
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""image_processor""", """tokenizer"""]
UpperCamelCase_ = """ViltImageProcessor"""
UpperCamelCase_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor
def __call__( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE : int = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def __A ( self : Union[str, Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : str , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : str ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self : Tuple ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase__ , )
return self.image_processor_class
@property
def __A ( self : str ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCamelCase__ , )
return self.image_processor
| 182
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase__ ( unittest.TestCase):
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : str = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : List[str] = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE : str = CLIPImageProcessor(**UpperCamelCase__ )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
config.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[int] = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained('''clip-base''' )
def __A ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(UpperCamelCase__ , revision='''aaaaaa''' )
def __A ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __A ( self : List[Any] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Any = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __A ( self : Any ):
'''simple docstring'''
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCamelCase__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 182
| 1
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> int:
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase__ ( ) -> List[Any]:
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase__ ( ) -> List[Any]:
__snake_case = '''mock-s3-bucket'''
__snake_case = f"""s3://{mock_bucket}"""
__snake_case = extract_path_from_uri(snake_case_ )
assert dataset_path.startswith('''s3://''' ) is False
__snake_case = '''./local/path'''
__snake_case = extract_path_from_uri(snake_case_ )
assert dataset_path == new_dataset_path
def lowerCamelCase__ ( snake_case_ : int ) -> str:
__snake_case = is_remote_filesystem(snake_case_ )
assert is_remote is True
__snake_case = fsspec.filesystem('''file''' )
__snake_case = is_remote_filesystem(snake_case_ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , snake_case_ )
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Dict ) -> str:
__snake_case = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
__snake_case = input_paths[compression_fs_class.protocol]
if input_path is None:
__snake_case = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case_ )
__snake_case = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
__snake_case = os.path.basename(snake_case_ )
__snake_case = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(snake_case_ , '''r''' , encoding='''utf-8''' ) as f, open(snake_case_ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : Dict , snake_case_ : str ) -> Optional[Any]:
__snake_case = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
__snake_case = compressed_file_paths[protocol]
__snake_case = '''dataset.jsonl'''
__snake_case = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
__snake_case , *__snake_case = fsspec.get_fs_token_paths(snake_case_ )
assert fs.isfile(snake_case_ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ) -> Optional[Any]:
__snake_case = hf_api.dataset_info(snake_case_ , token=snake_case_ )
__snake_case = HfFileSystem(repo_info=snake_case_ , token=snake_case_ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(snake_case_ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def lowerCamelCase__ ( ) -> int:
__snake_case = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case_ , snake_case_ , clobber=snake_case_ )
with pytest.warns(snake_case_ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case_ ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 238
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ = data_utils.TransfoXLTokenizer
snake_case_ = data_utils.TransfoXLCorpus
snake_case_ = data_utils
snake_case_ = data_utils
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : int ) -> Dict:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , '''rb''' ) as fp:
__snake_case = pickle.load(snake_case_ , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__snake_case = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
__snake_case = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
__snake_case = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , snake_case_ )
__snake_case = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__snake_case = os.path.abspath(snake_case_ )
__snake_case = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__snake_case = TransfoXLConfig()
else:
__snake_case = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case = TransfoXLLMHeadModel(snake_case_ )
__snake_case = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
__snake_case = os.path.join(snake_case_ , snake_case_ )
__snake_case = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
snake_case_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 238
| 1
|
from typing import List
import numpy as np
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = {key: len(a_ ) for key, value in gen_kwargs.items() if isinstance(a_ , a_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
__A = max(lists_lengths.values() , default=0 )
return max(1 , a_ )
def UpperCAmelCase ( a_ , a_ ) -> List[range]:
"""simple docstring"""
__A = []
for group_idx in range(a_ ):
__A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__A = range(a_ , start + num_shards_to_add )
shards_indices_per_group.append(a_ )
return shards_indices_per_group
def UpperCAmelCase ( a_ , a_ ) -> List[dict]:
"""simple docstring"""
__A = _number_of_shards_in_gen_kwargs(a_ )
if num_shards == 1:
return [dict(a_ )]
else:
__A = _distribute_shards(num_shards=a_ , max_num_jobs=a_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(a_ , a_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(a_ ) )
]
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , a_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def UpperCAmelCase ( a_ , a_ ) -> dict:
"""simple docstring"""
__A = {len(a_ ) for value in gen_kwargs.values() if isinstance(a_ , a_ )}
__A = {}
for size in list_sizes:
__A = list(range(a_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__A = dict(a_ )
for key, value in shuffled_kwargs.items():
if isinstance(a_ , a_ ):
__A = [value[i] for i in indices_per_size[len(a_ )]]
return shuffled_kwargs
| 15
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE :Union[str, Any] = get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "dummy_data"
snake_case_ = "datasets"
snake_case_ = False
def __init__( self : Optional[int] ,A : str ,A : str ,A : Union[Version, str] ,A : Optional[str] = None ,A : bool = False ,A : bool = True ,A : Optional[List[Callable]] = None ,):
__A = 0
__A = dataset_name
__A = cache_dir
__A = use_local_dummy_data
__A = config
# download_callbacks take a single url as input
__A = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__A = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__A = str(A )
# to be downloaded
__A = None
__A = None
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
if self._dummy_file is None:
__A = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self : Optional[Any] ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def UpperCamelCase_ ( self : List[Any] ):
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def UpperCamelCase_ ( self : Tuple ):
__A = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__A = cached_path(
A ,cache_dir=self.cache_dir ,extract_compressed_file=A ,force_extract=A )
return os.path.join(A ,self.dummy_file_name )
@property
def UpperCamelCase_ ( self : str ):
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def UpperCamelCase_ ( self : Any ):
if self._bucket_url is None:
__A = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self : Tuple ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,*A : Dict ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__A = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__A = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A ,A ):
return self.create_dummy_data_dict(A ,A )
elif isinstance(A ,(list, tuple) ):
return self.create_dummy_data_list(A ,A )
else:
return self.create_dummy_data_single(A ,A )
def UpperCamelCase_ ( self : str ,A : List[Any] ,*A : List[Any] ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Tuple ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : Any ,A : Any ,*A : Optional[Any] ,**A : List[str] ):
return path
def UpperCamelCase_ ( self : str ):
return {}
def UpperCamelCase_ ( self : int ,A : int ,A : Tuple ):
__A = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A ,A ):
for single_url in single_urls:
download_callback(A )
else:
__A = single_urls
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A ,A ):
__A = [os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) for x in single_urls]
else:
__A = single_urls
__A = os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) )
__A = value
# make sure that values are unique
if all(isinstance(A ,A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__A = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : str ):
__A = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,A ) ) for url in data_url )
__A = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__A = [data_url[0]] * len(A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(A )
return dummy_data_list
def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : Dict ):
pass
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ):
def _iter_archive_members(A : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__A = Path(self.dummy_file ).parent
__A = path.relative_to(A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__A = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A )
__A = Path(A )
__A = _iter_archive_members(A ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(A ).as_posix(), file_path.open("rb" )
def UpperCamelCase_ ( self : List[Any] ,A : Any ):
if not isinstance(A ,A ):
__A = [paths]
for path in paths:
if os.path.isfile(A ):
if os.path.basename(A ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A ):
if os.path.basename(A ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(A ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(A ,A )
| 15
| 1
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : int ) -> Any:
__lowerCamelCase = inspect.getfile(accelerate.test_utils )
__lowerCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__lowerCamelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__lowerCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __A ( self : Tuple ) -> Dict:
print(f'''Found {torch.cuda.device_count()} devices.''' )
__lowerCamelCase = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
@require_multi_gpu
def __A ( self : Union[str, Any] ) -> str:
print(f'''Found {torch.cuda.device_count()} devices.''' )
__lowerCamelCase = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
@require_multi_gpu
def __A ( self : Optional[int] ) -> Optional[Any]:
__lowerCamelCase = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
@require_multi_gpu
def __A ( self : List[Any] ) -> Any:
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__lowerCamelCase = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = Accelerator()
SCREAMING_SNAKE_CASE : Dict = (accelerator.state.process_index + 2, 10)
SCREAMING_SNAKE_CASE : Optional[int] = torch.randint(0, 10, shape).to(accelerator.device)
SCREAMING_SNAKE_CASE : int = ""
SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
SCREAMING_SNAKE_CASE : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
SCREAMING_SNAKE_CASE : Optional[int] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 362
|
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
__lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
__lowerCamelCase = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Initialise PyTorch model
_UpperCamelCase : Union[str, Any] = FunnelConfig.from_json_file(UpperCAmelCase_ )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCamelCase : Tuple = FunnelBaseModel(UpperCAmelCase_ ) if base_model else FunnelModel(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
snake_case_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 83
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__magic_name__ = False
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self):
return 1_2
@property
def snake_case_ ( self):
return 1_2
@property
def snake_case_ ( self):
return 3_2
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
return tokenizer
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowerCAmelCase__)
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = 1_2
__SCREAMING_SNAKE_CASE = 1_2
__SCREAMING_SNAKE_CASE = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
__SCREAMING_SNAKE_CASE = TransformeraDModel(**lowerCAmelCase__)
return model
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.dummy_vqvae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE = self.dummy_transformer
__SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed)
__SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """teddy bear playing in the pool"""
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""")
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2)[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.dummy_vqvae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE = self.dummy_transformer
__SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed)
__SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCAmelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
__SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """teddy bear playing in the pool"""
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""")
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2)[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""")
__SCREAMING_SNAKE_CASE = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""")
__SCREAMING_SNAKE_CASE = pipeline.to(lowerCAmelCase__)
pipeline.set_progress_bar_config(disable=lowerCAmelCase__)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=lowerCAmelCase__ , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image).max() < 2.0
| 255
|
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [x.strip() for x in open(UpperCamelCase_ ).readlines()]
__SCREAMING_SNAKE_CASE = [x.strip() for x in open(UpperCamelCase_ ).readlines()][: len(UpperCamelCase_ )]
__SCREAMING_SNAKE_CASE = calculate_rouge(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
if save_path is not None:
save_json(UpperCamelCase_ , UpperCamelCase_ , indent=UpperCamelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 255
| 1
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCAmelCase : Optional[int] ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
_UpperCAmelCase : List[Any] ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
_UpperCAmelCase : Dict ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
return float((preds == labels).mean() )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="binary" )-> Union[str, Any]:
lowerCAmelCase_ : Any = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Dict = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ , average=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
lowerCAmelCase_ : Tuple = {}
for id_pred, label in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : str = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
lowerCAmelCase_ : int = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase_ : str = [(pred, label)]
lowerCAmelCase_ , lowerCAmelCase_ : Any = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase_ , lowerCAmelCase_ : int = zip(*lowerCAmelCase_ )
lowerCAmelCase_ : str = fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ , average='''macro''' )
fas.append(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCAmelCase_ ) )
ems.append(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = float(sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = float(fa_score(y_true=lowerCAmelCase_ , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case__( datasets.Metric ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def lowercase_ ( self ) -> Optional[Any]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def lowercase_ ( self , __lowercase , __lowercase ) -> Dict:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__lowercase , __lowercase )}
elif self.config_name == "cb":
return acc_and_fa(__lowercase , __lowercase , fa_avg='''macro''' )
elif self.config_name == "record":
lowerCAmelCase_ : Dict = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowerCAmelCase_ : Tuple = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(__lowercase , __lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__lowercase , __lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 262
|
import inspect
import unittest
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> int:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def lowercase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
lowerCAmelCase_ : Any = inspect.getmembers(__lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCAmelCase_ : Optional[int] = '''k-diffusion'''
elif backend == "invisible_watermark":
lowerCAmelCase_ : Dict = '''invisible-watermark'''
assert backend in deps, f"""{backend} is not in the deps table!"""
| 262
| 1
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( snake_case : list[int] )-> int:
if not nums:
return 0
_lowerCamelCase = nums[0]
_lowerCamelCase = 0
for num in nums[1:]:
_lowerCamelCase , _lowerCamelCase = (
max_excluding + num,
max(snake_case , snake_case ),
)
return max(snake_case , snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
|
"""simple docstring"""
from math import factorial
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 20 )-> int:
_lowerCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_lowerCamelCase = n // 2
return int(factorial(snake_case ) / (factorial(snake_case ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
A_ : Optional[Any] =int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 80
| 1
|
import numpy as np
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return np.where(vector > 0 , SCREAMING_SNAKE_CASE , (alpha * (np.exp(SCREAMING_SNAKE_CASE ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase = None
UpperCamelCase = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class _lowerCamelCase :
"""simple docstring"""
snake_case = True
snake_case = None
# Automatically constructed
snake_case = "PIL.Image.Image"
snake_case = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
snake_case = field(default="Image" , init=UpperCamelCase , repr=UpperCamelCase )
def __call__( self )->int:
'''simple docstring'''
return self.pa_type
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )->"PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
A_ : List[str] = {}
A_ , A_ : str = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
A_ : List[str] = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
A_ : List[str] = path.split('''::''' )[-1]
try:
A_ : int = string_to_dict(_SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )['''repo_id''']
A_ : Optional[int] = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
A_ : Any = None
with xopen(_SCREAMING_SNAKE_CASE , '''rb''' , use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
A_ : Optional[Any] = BytesIO(f.read() )
A_ : Dict = PIL.Image.open(bytes_ )
else:
A_ : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _snake_case ( self )->Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
A_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
A_ : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
A_ : Tuple = storage.field('''bytes''' )
else:
A_ : Optional[int] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
A_ : Optional[Any] = storage.field('''path''' )
else:
A_ : Optional[int] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
A_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A_ : str = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : List[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE , '''rb''' ) as f:
A_ : Any = f.read()
return bytes_
A_ : Dict = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A_ : List[Any] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
A_ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def _SCREAMING_SNAKE_CASE ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A_ : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Dict = BytesIO()
if image.format in list_image_compression_formats():
A_ : Tuple = image.format
else:
A_ : List[str] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(SCREAMING_SNAKE_CASE , format=SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if hasattr(SCREAMING_SNAKE_CASE , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE )}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
A_ : Union[str, Any] = array.dtype
A_ : Dict = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
A_ : Any = dtype.kind
A_ : Any = dtype.itemsize
A_ : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A_ : List[Any] = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A_ : int = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A_ : Any = dtype_byteorder + dtype_kind + str(SCREAMING_SNAKE_CASE )
A_ : Optional[int] = np.dtype(SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
A_ : Tuple = PIL.Image.fromarray(array.astype(SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE )}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
A_ , A_ : Union[str, Any] = first_non_null_value(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
A_ : Tuple = no_op_if_value_is_null(SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
A_ : List[str] = no_op_if_value_is_null(SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 186
| 1
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : str = tempfile.mkdtemp()
__UpperCamelCase : Optional[Any] = 8
# DPR tok
__UpperCamelCase : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__UpperCamelCase : Any = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__UpperCamelCase : Dict = os.path.join(__UpperCamelCase , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
__UpperCamelCase : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__UpperCamelCase : Union[str, Any] = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__UpperCamelCase : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__UpperCamelCase : str = {"unk_token": "<unk>"}
__UpperCamelCase : Dict = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__UpperCamelCase : Any = os.path.join(__UpperCamelCase , BART_VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Tuple = os.path.join(__UpperCamelCase , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCamelCase ) )
def __lowerCamelCase ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __lowerCamelCase ( self ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __lowerCamelCase ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : List[str] = self.get_dummy_dataset()
__UpperCamelCase : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
__UpperCamelCase : Optional[Any] = dataset
__UpperCamelCase : Optional[Any] = RagRetriever(
__UpperCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = self.get_dummy_dataset()
__UpperCamelCase : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
__UpperCamelCase : List[str] = os.path.join(self.tmpdirname , "dataset" )
__UpperCamelCase : Dict = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
__UpperCamelCase : Dict = RagRetriever(
__UpperCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__UpperCamelCase : List[str] = RagRetriever(
__UpperCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCamelCase ) , )
return retriever
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
__UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
__UpperCamelCase : Optional[int] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(__UpperCamelCase , open(__UpperCamelCase , "wb" ) )
__UpperCamelCase : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
__UpperCamelCase : List[Any] = RagRetriever(
__UpperCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase : Union[str, Any] = retriever.retrieve(__UpperCamelCase , n_docs=__UpperCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __UpperCamelCase )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Dict = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
__UpperCamelCase : List[Any] = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCamelCase )
__UpperCamelCase : Any = RagRetriever.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase : List[str] = retriever.retrieve(__UpperCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Dict = 1
__UpperCamelCase : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase )
__UpperCamelCase : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase : int = retriever.retrieve(__UpperCamelCase , n_docs=__UpperCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __UpperCamelCase )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCamelCase )
__UpperCamelCase : Dict = RagRetriever.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase : str = retriever.retrieve(__UpperCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = 1
__UpperCamelCase : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase )
__UpperCamelCase : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase : Union[str, Any] = retriever.retrieve(__UpperCamelCase , n_docs=__UpperCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __UpperCamelCase )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : str = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCamelCase )
__UpperCamelCase : Optional[int] = RagRetriever.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase : Any = retriever.retrieve(__UpperCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : List[str] = 1
__UpperCamelCase : Optional[int] = self.get_dummy_legacy_index_retriever()
__UpperCamelCase : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase : str = retriever.retrieve(__UpperCamelCase , n_docs=__UpperCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , __UpperCamelCase )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCamelCase )
__UpperCamelCase : Optional[int] = RagRetriever.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase : str = retriever.retrieve(__UpperCamelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
import torch
__UpperCamelCase : Tuple = 1
__UpperCamelCase : int = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase : int = [[5, 7], [10, 11]]
__UpperCamelCase : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase : List[Any] = retriever(__UpperCamelCase , __UpperCamelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCamelCase )
__UpperCamelCase : Optional[int] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , np.ndarray )
__UpperCamelCase : List[str] = retriever(
__UpperCamelCase , __UpperCamelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCamelCase , return_tensors="pt" , )
__UpperCamelCase : str = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : int = self.get_dpr_ctx_encoder_tokenizer()
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCamelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCamelCase )
__UpperCamelCase : Optional[int] = [[5, 7], [10, 11]]
__UpperCamelCase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase : Any = retriever(__UpperCamelCase , __UpperCamelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCamelCase )
self.assertEqual(
len(__UpperCamelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , __UpperCamelCase ) # check for doc token related keys in dictionary.
| 360
|
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase_ (_lowerCAmelCase : int , _lowerCAmelCase : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase_ (_lowerCAmelCase : int ):
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Optional[Any] = 11
__UpperCamelCase : List[str] = int("1" + "0" * digit_len )
for num in range(_lowerCAmelCase , _lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCAmelCase , _lowerCAmelCase ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__UpperCamelCase : Tuple = 10
return solutions
def UpperCAmelCase_ (_lowerCAmelCase : int = 2 ):
__UpperCamelCase : Optional[Any] = 1.0
for fraction in fraction_list(_lowerCAmelCase ):
__UpperCamelCase : Union[str, Any] = Fraction(_lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 171
| 0
|
import math
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
lowerCamelCase__: Any =math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__a )
def lowerCAmelCase_ ( __a = 1 / 12345 ) -> int:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =0
lowerCamelCase__: List[Any] =0
lowerCamelCase__: str =3
while True:
lowerCamelCase__: Optional[int] =(integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__a ):
lowerCamelCase__: List[Any] =int(__a )
total_partitions += 1
if check_partition_perfect(__a ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__a )
integer += 1
if __name__ == "__main__":
print(f'{solution() = }')
| 10
|
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= [False] * len(lowercase__ )
__lowercase= []
queue.append(lowercase__ )
__lowercase= True
while queue:
__lowercase= queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase__ )
__lowercase= True
__lowercase= u
return visited[t]
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [-1] * (len(lowercase__ ))
__lowercase= 0
while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowercase= float('Inf' )
__lowercase= sink
while s != source:
# Find the minimum value in select path
__lowercase= min(lowercase__ , graph[parent[s]][s] )
__lowercase= parent[s]
max_flow += path_flow
__lowercase= sink
while v != source:
__lowercase= parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase= parent[v]
return max_flow
lowerCAmelCase = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase ,lowerCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 295
| 0
|
"""simple docstring"""
import argparse
SCREAMING_SNAKE_CASE_ : Any = 'docs/source/_static/js/custom.js'
def _snake_case ( UpperCAmelCase_ : List[Any] ):
with open(UpperCAmelCase_ , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ = f.readlines()
A__ = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
A__ = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : int = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
update_custom_js(args.version)
| 358
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
SCREAMING_SNAKE_CASE_ : Any = data_utils.TransfoXLTokenizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = data_utils.TransfoXLCorpus
SCREAMING_SNAKE_CASE_ : str = data_utils
SCREAMING_SNAKE_CASE_ : List[Any] = data_utils
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCAmelCase_ , """rb""" ) as fp:
A__ = pickle.load(UpperCAmelCase_ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
A__ = corpus.vocab.__dict__
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , UpperCAmelCase_ )
A__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A__ = os.path.abspath(UpperCAmelCase_ )
A__ = os.path.abspath(UpperCAmelCase_ )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A__ = TransfoXLConfig()
else:
A__ = TransfoXLConfig.from_json_file(UpperCAmelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
A__ = TransfoXLLMHeadModel(UpperCAmelCase_ )
A__ = load_tf_weights_in_transfo_xl(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
A__ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Save PyTorch model to {os.path.abspath(UpperCAmelCase_ )}""" )
torch.save(model.state_dict() , UpperCAmelCase_ )
print(F"""Save configuration file to {os.path.abspath(UpperCAmelCase_ )}""" )
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
SCREAMING_SNAKE_CASE_ : Any = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 69
| 0
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _lowercase :
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str="resnet50" , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Union[str, Any]=32 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Optional[Any]=True , ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Dict = out_indices if out_indices is not None else [4]
__SCREAMING_SNAKE_CASE : Optional[Any] = stage_names
__SCREAMING_SNAKE_CASE : Dict = out_features
__SCREAMING_SNAKE_CASE : List[Any] = backbone
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : str = num_channels
__SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
__SCREAMING_SNAKE_CASE : Tuple = is_training
def __magic_name__( self :Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values
def __magic_name__( self :Dict ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __magic_name__( self :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = TimmBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :Any ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[str] = TimmBackboneModelTester(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Any = '''resnet18'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''microsoft/resnet-18'''
__SCREAMING_SNAKE_CASE : str = AutoBackbone.from_pretrained(lowerCAmelCase__ , use_timm_backbone=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoBackbone.from_pretrained(lowerCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__SCREAMING_SNAKE_CASE : Any = AutoBackbone.from_pretrained(lowerCAmelCase__ , use_timm_backbone=lowerCAmelCase__ , out_indices=[1, 2, 3] )
__SCREAMING_SNAKE_CASE : Dict = AutoBackbone.from_pretrained(lowerCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __magic_name__( self :str ) -> List[Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __magic_name__( self :Tuple ) -> Any:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __magic_name__( self :List[str] ) -> int:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __magic_name__( self :List[str] ) -> Tuple:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __magic_name__( self :str ) -> str:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __magic_name__( self :Dict ) -> Tuple:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __magic_name__( self :List[str] ) -> Tuple:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __magic_name__( self :Union[str, Any] ) -> int:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __magic_name__( self :Dict ) -> str:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __magic_name__( self :Dict ) -> Any:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __magic_name__( self :str ) -> Tuple:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __magic_name__( self :int ) -> str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__( self :Union[str, Any] ) -> Union[str, Any]:
pass
def __magic_name__( self :List[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Tuple = self.has_attentions
# no need to test all models as different heads yield the same functionality
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.all_model_classes[0]
__SCREAMING_SNAKE_CASE : int = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = model(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = outputs[0][-1]
# Encoder-/Decoder-only models
__SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__SCREAMING_SNAKE_CASE : List[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __magic_name__( self :Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(**lowerCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : str = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(**lowerCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(**lowerCAmelCase__ )
| 9
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : List[Any] , _a : List[str]=7 , _a : List[str]=3 , _a : Tuple=18 , _a : Tuple=30 , _a : str=400 , _a : Tuple=None , _a : Union[str, Any]=True , _a : List[str]=True , _a : Optional[int]=None , ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =size if size is not None else {'height': 20, 'width': 20}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =do_convert_rgb
_SCREAMING_SNAKE_CASE =[512, 1024, 2048, 4096]
_SCREAMING_SNAKE_CASE =patch_size if patch_size is not None else {'height': 16, 'width': 16}
def A ( self : Any ) -> List[str]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A ( self : int ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_SCREAMING_SNAKE_CASE =Image.open(requests.get(_a , stream=_a ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
A__ = PixaStructImageProcessor if is_vision_available() else None
def A ( self : Dict ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self )
@property
def A ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'do_convert_rgb' ) )
def A ( self : Any ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_dummy_image()
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
_SCREAMING_SNAKE_CASE =2048
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='pt' , max_patches=_a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def A ( self : Any ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_SCREAMING_SNAKE_CASE =True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
_SCREAMING_SNAKE_CASE ='Hello'
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : List[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
A__ = PixaStructImageProcessor if is_vision_available() else None
def A ( self : str ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self , num_channels=4 )
_SCREAMING_SNAKE_CASE =3
@property
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'do_convert_rgb' ) )
def A ( self : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 47
| 0
|
"""simple docstring"""
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase ( *A_ )-> Tuple:
'''simple docstring'''
with open(A_ , "r" ) as fh:
fcntl.flock(A_ , fcntl.LOCK_EX )
try:
print(*A_ )
finally:
fcntl.flock(A_ , fcntl.LOCK_UN )
__lowercase = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
__lowercase = torch.device("""cuda""", local_rank)
__lowercase = socket.gethostname()
__lowercase = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowercase = dist.get_rank()
__lowercase = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 355
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase ( A_ , A_ , A_ , A_=5 )-> Union[str, Any]:
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a : List[str] = torch.tensor(tokenizer.encode(A_ , add_special_tokens=A_ ) ).unsqueeze(0 ) # Batch size 1
a : Dict = model(A_ )[0] # The last hidden-state is the first element of the output tuple
a : int = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a : Optional[Any] = logits[0, masked_index, :]
a : Dict = logits.softmax(dim=0 )
a , a : Any = prob.topk(k=A_ , dim=0 )
a : Optional[Any] = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A_ ) )] )
a : str = tokenizer.mask_token
a : Any = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a : Dict = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(A_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(A_ ) , A_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A_ , A_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__lowercase = CamembertTokenizer.from_pretrained("""camembert-base""")
__lowercase = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
__lowercase = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 226
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.