code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( _snake_case ):
"""simple docstring"""
def __init__( self: Optional[Any] , __A: WhisperForConditionalGeneration , __A: WhisperProcessor , __A: AutoencoderKL , __A: CLIPTextModel , __A: CLIPTokenizer , __A: UNetaDConditionModel , __A: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __A: StableDiffusionSafetyChecker , __A: CLIPImageProcessor , ) -> int:
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCAmelCase__ , speech_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def __A ( self: List[Any] , __A: Optional[Union[str, int]] = "auto" ) -> int:
if slice_size == "auto":
_A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def __A ( self: Union[str, Any] ) -> Union[str, Any]:
self.enable_attention_slicing(lowerCAmelCase__ )
@torch.no_grad()
def __call__( self: Dict , __A: Optional[Any] , __A: List[str]=1_60_00 , __A: int = 5_12 , __A: int = 5_12 , __A: int = 50 , __A: float = 7.5 , __A: Optional[Union[str, List[str]]] = None , __A: Optional[int] = 1 , __A: float = 0.0 , __A: Optional[torch.Generator] = None , __A: Optional[torch.FloatTensor] = None , __A: Optional[str] = "pil" , __A: bool = True , __A: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A: int = 1 , **__A: Any , ) -> Optional[Any]:
_A = self.speech_processor.feature_extractor(
lowerCAmelCase__ , return_tensors='''pt''' , sampling_rate=lowerCAmelCase__ ).input_features.to(self.device )
_A = self.speech_model.generate(lowerCAmelCase__ , max_length=48_00_00 )
_A = self.speech_processor.tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , normalize=lowerCAmelCase__ )[
0
]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_A = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_A = len(lowerCAmelCase__ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowerCAmelCase__ )}.""" )
# get prompt text embeddings
_A = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_A = text_input_ids[:, : self.tokenizer.model_max_length]
_A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_A ,_A ,_A = text_embeddings.shape
_A = text_embeddings.repeat(1 , lowerCAmelCase__ , 1 )
_A = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_A = 42
if negative_prompt is None:
_A = [''''''] * batch_size
elif type(lowerCAmelCase__ ) is not type(lowerCAmelCase__ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__ )} !="""
f""" {type(lowerCAmelCase__ )}.""" )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_A = [negative_prompt]
elif batch_size != len(lowerCAmelCase__ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
_A = negative_prompt
_A = text_input_ids.shape[-1]
_A = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
_A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_A = uncond_embeddings.shape[1]
_A = uncond_embeddings.repeat(1 , lowerCAmelCase__ , 1 )
_A = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_A = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device='''cpu''' , dtype=lowerCAmelCase__ ).to(
self.device )
else:
_A = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A = {}
if accepts_eta:
_A = eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
_A = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
_A ,_A = noise_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_A = 1 / 0.18_215 * latents
_A = self.vae.decode(lowerCAmelCase__ ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__ )
| 701 |
from __future__ import annotations
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741
'''simple docstring'''
while r - l > 1:
_A = (l + r) // 2
if v[m] >= key:
_A = m
else:
_A = m # noqa: E741
return r
def __A ( _lowercase ):
'''simple docstring'''
if len(_lowercase ) == 0:
return 0
_A = [0] * len(_lowercase )
_A = 1
_A = v[0]
for i in range(1 , len(_lowercase ) ):
if v[i] < tail[0]:
_A = v[i]
elif v[i] > tail[length - 1]:
_A = v[i]
length += 1
else:
_A = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
A_ = BlenderbotSmallTokenizer
A_ = False
def __A ( self: Union[str, Any] ) -> str:
super().setUp()
_A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_A = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase_ ) )
def __A ( self: List[str] , **__A: List[Any] ) -> int:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def __A ( self: Optional[int] , __A: Optional[Any] ) -> Optional[int]:
_A = '''adapt act apte'''
_A = '''adapt act apte'''
return input_text, output_text
def __A ( self: List[str] ) -> List[Any]:
_A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = '''adapt act apte'''
_A = ['''adapt''', '''act''', '''ap@@''', '''te''']
_A = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def __A ( self: List[Any] ) -> Dict:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
_A = '''I am a small frog.'''
_A = tok([src_text] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )['''input_ids''']
_A = tok.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __A ( self: int ) -> List[str]:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_A = '''I am a small frog .'''
_A = '''.'''
_A = tok(UpperCAmelCase_ )['''input_ids''']
_A = tok(UpperCAmelCase_ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 702 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "sequence-classification"
def __init__( self: str , __A: Union[str, Any] ) -> List[str]:
if type(__A ) == dict:
_A = Namespace(**__A )
_A = glue_output_modes[hparams.task]
_A = glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]:
return self.model(**__A )
def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]:
_A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_A = self(**__A )
_A = outputs[0]
_A = self.trainer.lr_schedulers[0]['''scheduler''']
_A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __A ( self: List[str] ) -> Dict:
_A = self.hparams
_A = processors[args.task]()
_A = processor.get_labels()
for mode in ["train", "dev"]:
_A = self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __A )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
_A = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
_A = convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __A )
torch.save(__A , __A )
def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader:
_A = '''dev''' if mode == '''test''' else mode
_A = self._feature_file(__A )
logger.info('''Loading features from cached file %s''' , __A )
_A = torch.load(__A )
_A = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_A = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_A = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def __A ( self: List[str] , __A: str , __A: Tuple ) -> str:
_A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_A = self(**__A )
_A ,_A = outputs[:2]
_A = logits.detach().cpu().numpy()
_A = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __A ( self: str , __A: Dict ) -> tuple:
_A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
_A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_A = np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_A = np.squeeze(__A )
_A = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
_A = [[] for _ in range(out_label_ids.shape[0] )]
_A = [[] for _ in range(out_label_ids.shape[0] )]
_A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_A = dict(results.items() )
_A = results
return ret, preds_list, out_label_list
def __A ( self: Any , __A: list ) -> dict:
_A ,_A ,_A = self._eval_end(__A )
_A = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __A ( self: int , __A: Union[str, Any] ) -> dict:
_A ,_A ,_A = self._eval_end(__A )
_A = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=__A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __A ( ):
'''simple docstring'''
_A = argparse.ArgumentParser()
add_generic_args(_lowercase , os.getcwd() )
_A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() )
_A = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_A = os.path.join(
'''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , )
os.makedirs(args.output_dir )
_A = GLUETransformer(_lowercase )
_A = generic_train(_lowercase , _lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) )
_A = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowercase )
if __name__ == "__main__":
main()
| 62 | 0 |
from __future__ import annotations
from typing import TypedDict
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __A ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(_lowercase ) )]
def __A ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
_A = all_rotations(_lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_A = {
'''bwt_string''': ''''''.join([word[-1] for word in rotations] ),
'''idx_original_string''': rotations.index(_lowercase ),
}
return response
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
_A = int(_lowercase )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(_lowercase ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
_A = [''''''] * len(_lowercase )
for _ in range(len(_lowercase ) ):
for i in range(len(_lowercase ) ):
_A = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__A = "Provide a string that I will generate its BWT transform: "
__A = input(entry_msg).strip()
__A = bwt_transform(s)
print(
f'Burrows Wheeler transform for string \'{s}\' results '
f'in \'{result["bwt_string"]}\''
)
__A = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
f'we get original string \'{original_string}\''
)
| 703 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __A ( _lowercase = "" ):
'''simple docstring'''
_A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
_A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' )
_A = soup.find_all('''td''' , attrs='''titleColumn''' )
_A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowercase , _lowercase )
}
def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
_A = get_imdb_top_aaa_movies()
with open(_lowercase , '''w''' , newline='''''' ) as out_file:
_A = csv.writer(_lowercase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 62 | 0 |
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = word.split()
def justify(_lowercase , _lowercase , _lowercase ) -> str:
_A = max_width - width
_A = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_A = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_A = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_A = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_A = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_A = []
_A = []
_A = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase , _lowercase , _lowercase ) )
# reset new line and new width
_A = [word], len(_lowercase )
_A = max_width - width - len(_lowercase )
answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = BlenderbotSmallTokenizer
A_ = False
def __A ( self: List[str] ) -> int:
super().setUp()
_A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def __A ( self: str , **__A: Optional[Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: str , __A: List[str] ) -> int:
_A = '''adapt act apte'''
_A = '''adapt act apte'''
return input_text, output_text
def __A ( self: Union[str, Any] ) -> Any:
_A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = '''adapt act apte'''
_A = ['''adapt''', '''act''', '''ap@@''', '''te''']
_A = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def __A ( self: Any ) -> List[str]:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
_A = '''I am a small frog.'''
_A = tok([src_text] , padding=__A , truncation=__A )['''input_ids''']
_A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __A ( self: Any ) -> int:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_A = '''I am a small frog .'''
_A = '''.'''
_A = tok(__A )['''input_ids''']
_A = tok(__A )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 62 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __A ( _lowercase ):
'''simple docstring'''
_A = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
_A = 10_24
_A = 40_96
_A = 24
_A = 16
_A = [5, 11, 17, 23]
_A = [2_56, 5_12, 10_24, 10_24]
_A = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
_A = 7_68
_A = [1, 1, 1, 0.5]
_A = [2_56, 5_12, 7_68, 7_68]
_A = 1_50
_A = 16
_A = (1, 3_84, 3_84)
_A = False
_A = "project"
if "ade" in checkpoint_url:
_A = True
_A = 7_68
_A = [1, 1, 1, 0.5]
_A = 1_50
_A = 16
_A = "huggingface/label-files"
_A = "ade20k-id2label.json"
_A = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='''dataset''' ) ) , '''r''' ) )
_A = {int(_A ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def __A ( _lowercase ):
'''simple docstring'''
_A = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_A , _A )
def __A ( _lowercase ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_A = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
_A = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
_A = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
_A = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
_A = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
_A = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
_A = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_A = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
_A = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
_A = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
_A = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
_A = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
_A = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
_A = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
_A = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
_A = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
_A = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_A = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_A = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
_A = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
_A = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
_A = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
_A = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_A = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
_A = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
_A = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
_A = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_A = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
_A = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
_A = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
_A = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
_A = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
_A = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
_A = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
_A = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
_A = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
_A = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
_A = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
_A = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
_A = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
_A = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
_A = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
_A = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
_A = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
_A = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
_A = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
_A = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_A = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[: config.hidden_size, :]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[
-config.hidden_size :, :
]
_A = in_proj_bias[-config.hidden_size :]
def __A ( ):
'''simple docstring'''
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = get_dpt_config(_A )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_A = torch.load(_A , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_A )
# rename keys
for key in state_dict.copy().keys():
_A = state_dict.pop(_A )
_A = val
# read in qkv matrices
read_in_q_k_v(_A , _A )
# load HuggingFace model
_A = DPTForSemanticSegmentation(_A ) if "ade" in checkpoint_url else DPTForDepthEstimation(_A )
model.load_state_dict(_A )
model.eval()
# Check outputs on an image
_A = 4_80 if "ade" in checkpoint_url else 3_84
_A = DPTImageProcessor(size=_A )
_A = prepare_img()
_A = image_processor(_A , return_tensors='''pt''' )
# forward pass
_A = model(**_A ).logits if "ade" in checkpoint_url else model(**_A ).predicted_depth
if show_prediction:
_A = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_A , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(_A ).mkdir(exist_ok=_A )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_A )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
__A = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "roberta"
def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@property
def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 62 | 0 |
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
_A = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase_ ) )
return round(lowerCamelCase_ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__A = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]:
super().__init__(*__A , **__A )
_A = eval_examples
_A = post_process_function
_A = quant_trainer_args
_A = 1_28 # default number of calibration samples
def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
_A = calib_dataset if calib_dataset is not None else self.calib_dataset
_A = self._remove_unused_columns(__A , description='''Calibration''' )
return DataLoader(
__A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , )
def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]:
_A = self.train_dataset if calib_dataset is None else calib_dataset
_A = self.get_calib_dataloader(__A )
_A = self.model
quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A )
model.eval()
quant_trainer.enable_calibration(__A )
logger.info('''***** Running calibration *****''' )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(__A ):
# Prediction step
_A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__A , self.quant_trainer_args )
_A = model
def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int:
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(__A )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
__A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
_A = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_A = self.post_process_function(__A , __A , output.predictions )
_A = self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_A = metrics.pop(__A )
self.log(__A )
else:
_A = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A )
return metrics
def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]:
_A = self.get_test_dataloader(__A )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
__A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
_A = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(__A , __A , output.predictions , '''predict''' )
_A = self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_A = metrics.pop(__A )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A )
def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]:
_A = self.eval_dataset
_A = self.get_eval_dataloader(__A )
_A = next(iter(__A ) )
# saving device - to make it consistent
_A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
_A = tuple(v.to(__A ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
_A = True
_A = self.model.to(__A )
model.eval()
model.float()
_A = model.module if hasattr(__A , '''module''' ) else model
quant_trainer.configure_model(__A , self.quant_trainer_args )
_A = os.path.join(__A , '''model.onnx''' )
logger.info(f"""exporting model to {output_model_file}""" )
_A = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__A , )
logger.info('''onnx export finished''' )
| 62 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = SpeechTaTokenizer
A_ = False
A_ = True
def __A ( self: str ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
_A = SpeechTaTokenizer(_lowercase )
_A = AddedToken('''<mask>''' , lstrip=_lowercase , rstrip=_lowercase )
_A = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self: Tuple , __A: Union[str, Any] ) -> List[str]:
_A = 'this is a test'
_A = 'this is a test'
return input_text, output_text
def __A ( self: Any , __A: Any , __A: Optional[int]=False , __A: str=20 , __A: str=5 ) -> List[Any]:
_A = self.get_input_output_texts(_lowercase )
_A = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
_A = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase )
return text, ids
def __A ( self: str ) -> int:
_A = '<pad>'
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def __A ( self: List[Any] ) -> List[Any]:
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(_lowercase ) , 81 )
def __A ( self: Optional[Any] ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __A ( self: Optional[int] ) -> Any:
_A = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_A = tokenizer.vocab_size
_A = len(_lowercase )
self.assertNotEqual(_lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_A = tokenizer.add_tokens(_lowercase )
_A = tokenizer.vocab_size
_A = len(_lowercase )
self.assertNotEqual(_lowercase , 0 )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , len(_lowercase ) )
self.assertEqual(_lowercase , all_size + len(_lowercase ) )
_A = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_lowercase )
self.assertGreaterEqual(len(_lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_A = tokenizer.add_special_tokens(_lowercase )
_A = tokenizer.vocab_size
_A = len(_lowercase )
self.assertNotEqual(_lowercase , 0 )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , len(_lowercase ) )
self.assertEqual(_lowercase , all_size_a + len(_lowercase ) )
_A = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_lowercase )
self.assertGreaterEqual(len(_lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __A ( self: Optional[int] ) -> Optional[int]:
pass
def __A ( self: Optional[int] ) -> Optional[Any]:
pass
def __A ( self: str ) -> Optional[Any]:
_A = self.get_tokenizer()
_A = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(_lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_A = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
_A = tokenizer.convert_tokens_to_ids(_lowercase )
# fmt: off
self.assertListEqual(_lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_A = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __A ( self: List[str] ) -> str:
# Use custom sequence because this tokenizer does not handle numbers.
_A = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_A = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_lowercase , )
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE ( __A ):
"""simple docstring"""
A_ = "trocr"
A_ = ["past_key_values"]
A_ = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self: Dict , __A: Any=5_02_65 , __A: Any=10_24 , __A: List[str]=12 , __A: str=16 , __A: Dict=40_96 , __A: int="gelu" , __A: Tuple=5_12 , __A: str=0.1 , __A: str=0.0 , __A: List[str]=0.0 , __A: List[str]=2 , __A: Dict=0.02 , __A: Any=0.0 , __A: int=True , __A: Tuple=False , __A: Optional[int]=True , __A: Dict=True , __A: int=1 , __A: List[str]=0 , __A: str=2 , **__A: List[Any] , ) -> List[str]:
_A = vocab_size
_A = d_model
_A = decoder_layers
_A = decoder_attention_heads
_A = decoder_ffn_dim
_A = activation_function
_A = max_position_embeddings
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = init_std
_A = decoder_layerdrop
_A = use_cache
_A = scale_embedding
_A = use_learned_position_embeddings
_A = layernorm_embedding
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , **__A , )
| 708 |
import itertools
import string
from collections.abc import Generator, Iterable
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = iter(_lowercase )
while True:
_A = tuple(itertools.islice(_lowercase , _lowercase ) )
if not chunk:
return
yield chunk
def __A ( _lowercase ):
'''simple docstring'''
_A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_A = ''''''
if len(_lowercase ) < 2:
return dirty
for i in range(len(_lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowercase ) & 1:
clean += "X"
return clean
def __A ( _lowercase ):
'''simple docstring'''
_A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_A = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowercase )
return table
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = generate_table(_lowercase )
_A = prepare_input(_lowercase )
_A = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
_A ,_A = divmod(table.index(_lowercase ) , 5 )
_A ,_A = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = generate_table(_lowercase )
_A = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
_A ,_A = divmod(table.index(_lowercase ) , 5 )
_A ,_A = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 62 | 0 |
def __A ( _lowercase : int , _lowercase : int ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
_A = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_token_type_ids
_A = use_input_mask
_A = use_labels
_A = use_mc_token_ids
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = self.vocab_size - 1
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
if self.use_mc_token_ids:
_A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
_A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self: Optional[int] ) -> List[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]:
_A = CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
_A = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str:
_A = CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
_A = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self: Optional[int] ) -> Dict:
_A = self.prepare_config_and_inputs()
(
(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,
) = config_and_inputs
_A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any:
_A = self.num_labels
_A = CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
A_ = (CTRLLMHeadModel,) if is_torch_available() else ()
A_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __A ( self: Any ) -> Union[str, Any]:
_A = CTRLModelTester(self )
_A = ConfigTester(self , config_class=__A , n_embd=37 )
def __A ( self: Optional[int] ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __A ( self: Dict ) -> Any:
self.config_tester.run_common_tests()
def __A ( self: str ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def __A ( self: List[str] ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: Optional[Any] ) -> int:
pass
@slow
def __A ( self: Tuple ) -> Dict:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __A ( self: Any ) -> Union[str, Any]:
pass
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: int ) -> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __A ( self: Any ) -> Any:
_A = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__A )
_A = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is
_A = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
_A = model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 62 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Tuple , __A: int , __A: Dict=12 , __A: Tuple=7 , __A: Union[str, Any]=True , __A: List[str]=True , __A: List[str]=True , __A: Tuple=99 , __A: Dict=32 , __A: Tuple=32 , __A: List[Any]=2 , __A: Optional[int]=4 , __A: str=37 , __A: Any=0.1 , __A: List[Any]=0.1 , __A: Dict=5_12 , __A: Tuple=0.02 , __A: List[Any]=0 , __A: Optional[Any]=None , ) -> List[Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = dropout
_A = attention_dropout
_A = max_position_embeddings
_A = initializer_range
_A = scope
_A = bos_token_id
def __A ( self: Optional[int] ) -> str:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_A = input_mask.numpy()
_A = input_mask.shape
_A = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase_ ):
_A = 1
_A = 0
_A = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCamelCase_ )
def __A ( self: Optional[int] ) -> List[Any]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __A ( self: Union[str, Any] , __A: Dict , __A: List[str] , __A: List[Any] ) -> Union[str, Any]:
_A = TFBlipTextModel(config=UpperCamelCase_ )
_A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , training=UpperCamelCase_ )
_A = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self: Optional[Any] ) -> Dict:
_A = self.prepare_config_and_inputs()
_A = config_and_inputs
_A = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (TFBlipTextModel,) if is_tf_available() else ()
A_ = False
A_ = False
A_ = False
def __A ( self: Optional[int] ) -> Dict:
_A = BlipTextModelTester(self )
_A = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def __A ( self: List[str] ) -> str:
self.config_tester.run_common_tests()
def __A ( self: List[str] ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __A ( self: Dict ) -> Union[str, Any]:
pass
def __A ( self: List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def __A ( self: Tuple ) -> Dict:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __A ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __A ( self: Tuple ) -> List[str]:
pass
@slow
def __A ( self: List[str] ) -> List[str]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFBlipTextModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def __A ( self: Tuple , __A: int=True ) -> Optional[Any]:
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCamelCase_ )
| 710 |
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = True
_A = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowercase , _lowercase , _lowercase )
order.append(_lowercase )
return order
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = True
_A = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowercase , _lowercase , _lowercase )
return component
def __A ( _lowercase ):
'''simple docstring'''
_A = len(_lowercase ) * [False]
_A = {vert: [] for vert in range(len(_lowercase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowercase )
_A = []
for i, was_visited in enumerate(_lowercase ):
if not was_visited:
order += topology_sort(_lowercase , _lowercase , _lowercase )
_A = []
_A = len(_lowercase ) * [False]
for i in range(len(_lowercase ) ):
_A = order[len(_lowercase ) - i - 1]
if not visited[vert]:
_A = find_components(_lowercase , _lowercase , _lowercase )
components_list.append(_lowercase )
return components_list
| 62 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__A = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__A = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__A = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __A ( self: int ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __A ( self: str , __A: List[Any] , __A: List[str] ) -> Any:
_A = 0.0
for i, j in zip(__A , __A ):
n_correct += 1.0 if math_equivalence.is_equiv(__A , __A ) else 0.0
_A = n_correct / len(__A )
return {
"accuracy": accuracy,
}
| 711 |
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase )
else:
_A = max(
mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , )
_A = val
return f[i][j]
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_A = dp[i - 1][w_]
return dp[n][w_], dp
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
_A = len(_lowercase )
if num_items != len(_lowercase ):
_A = (
'''The number of weights must be the same as the number of values.\n'''
f"""But got {num_items} weights and {len(_lowercase )} values"""
)
raise ValueError(_lowercase )
for i in range(_lowercase ):
if not isinstance(wt[i] , _lowercase ):
_A = (
'''All weights must be integers but got weight of '''
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(_lowercase )
_A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase )
_A = set()
_construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return optimal_val, example_optional_set
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase )
else:
optimal_set.add(_lowercase )
_construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase )
if __name__ == "__main__":
__A = [3, 2, 4, 4]
__A = [4, 3, 2, 3]
__A = 4
__A = 6
__A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__A , __A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__A , __A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 62 | 0 |
def __A ( _lowercase ):
'''simple docstring'''
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def __A ( _lowercase ):
'''simple docstring'''
_A = credit_card_number
_A = 0
_A = len(__lowerCAmelCase ) - 2
for i in range(__lowerCAmelCase , -1 , -2 ):
# double the value of every second digit
_A = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_A = cc_number[:i] + str(__lowerCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__lowerCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __A ( _lowercase ):
'''simple docstring'''
_A = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(__lowerCAmelCase ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(__lowerCAmelCase ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(__lowerCAmelCase ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 712 |
def __A ( _lowercase = 1_00_00_00 ):
'''simple docstring'''
_A = 1
_A = 1
_A = {1: 1}
for inputa in range(2 , _lowercase ):
_A = 0
_A = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_A = (3 * number) + 1
counter += 1
if inputa not in counters:
_A = counter
if counter > pre_counter:
_A = inputa
_A = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 62 | 0 |
from collections.abc import Generator
from math import sin
def __A ( _lowercase ):
'''simple docstring'''
if len(__UpperCAmelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_A = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __A ( _lowercase ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_A = format(__UpperCAmelCase , '''08x''' )[-8:]
_A = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __A ( _lowercase ):
'''simple docstring'''
_A = b''''''
for char in message:
bit_string += format(__UpperCAmelCase , '''08b''' ).encode('''utf-8''' )
_A = format(len(__UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCAmelCase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __A ( _lowercase ):
'''simple docstring'''
if len(__UpperCAmelCase ) % 5_12 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(__UpperCAmelCase ) , 5_12 ):
_A = bit_string[pos : pos + 5_12]
_A = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __A ( _lowercase ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_A = format(__UpperCAmelCase , '''032b''' )
_A = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCAmelCase , 2 )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
return (a + b) % 2**32
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __A ( _lowercase ):
'''simple docstring'''
_A = preprocess(__UpperCAmelCase )
_A = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_A = 0X67_452_301
_A = 0Xef_cda_b89
_A = 0X98_bad_cfe
_A = 0X10_325_476
_A = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCAmelCase ):
_A = aa
_A = ba
_A = ca
_A = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_A = d ^ (b & (c ^ d))
_A = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_A = c ^ (d & (b ^ c))
_A = (5 * i + 1) % 16
elif i <= 47:
_A = b ^ c ^ d
_A = (3 * i + 5) % 16
else:
_A = c ^ (b | not_aa(__UpperCAmelCase ))
_A = (7 * i) % 16
_A = (f + a + added_consts[i] + block_words[g]) % 2**32
_A = d
_A = c
_A = b
_A = sum_aa(__UpperCAmelCase , left_rotate_aa(__UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_A = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
_A = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
_A = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
_A = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
_A = reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = word.split()
def justify(_lowercase , _lowercase , _lowercase ) -> str:
_A = max_width - width
_A = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_A = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_A = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_A = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_A = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_A = []
_A = []
_A = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase , _lowercase , _lowercase ) )
# reset new line and new width
_A ,_A = [word], len(_lowercase )
_A = max_width - width - len(_lowercase )
answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Any , __A: Optional[Any] , __A: Any=2 , __A: Tuple=True , __A: Dict=False , __A: int=10 , __A: Optional[Any]=3 , __A: Union[str, Any]=32 * 4 , __A: Dict=32 * 6 , __A: List[Any]=4 , __A: Union[str, Any]=32 , ) -> str:
_A = parent
_A = batch_size
_A = is_training
_A = use_auxiliary_loss
_A = num_queries
_A = num_channels
_A = min_size
_A = max_size
_A = num_labels
_A = mask_feature_size
def __A ( self: List[str] ) -> List[Any]:
_A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
_A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
_A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
_A = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
_A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __A ( self: Dict ) -> List[Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __A ( self: Any ) -> int:
_A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs()
_A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __A ( self: Optional[Any] , __A: Any , __A: str ) -> List[str]:
_A = output.encoder_hidden_states
_A = output.pixel_decoder_hidden_states
_A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_config.decoder_layers )
def __A ( self: Any , __A: Tuple , __A: List[str] , __A: Optional[Any] , __A: Union[str, Any]=False ) -> Union[str, Any]:
with torch.no_grad():
_A = MaskFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
_A = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def __A ( self: List[str] , __A: Any , __A: str , __A: int , __A: str , __A: Dict ) -> int:
_A = MaskFormerForInstanceSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(__A: List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_A = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
_A = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
_A = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def __A ( self: Optional[int] ) -> Optional[Any]:
_A = MaskFormerModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def __A ( self: str ) -> str:
self.config_tester.run_common_tests()
def __A ( self: Dict ) -> Dict:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def __A ( self: Dict ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def __A ( self: Any ) -> Tuple:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def __A ( self: Any ) -> Tuple:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def __A ( self: Any ) -> Union[str, Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def __A ( self: Optional[Any] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __A ( self: Any ) -> str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: int ) -> List[Any]:
pass
def __A ( self: str ) -> Union[str, Any]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def __A ( self: List[str] ) -> List[str]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_A = MaskFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __A ( self: Tuple ) -> List[Any]:
_A = (self.model_tester.min_size,) * 2
_A = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
_A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_UpperCAmelCase )
_A = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def __A ( self: int ) -> List[Any]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def __A ( self: Dict ) -> int:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
_A = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def __A ( self: Optional[int] ) -> Any:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_A = self.all_model_classes[1]
_A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs()
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_A = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def __A ( self: Tuple ) -> str:
_A = self.all_model_classes[1]
_A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs()
_A = True
_A = True
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_A = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
_A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def __A ( ):
'''simple docstring'''
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self: Dict ) -> int:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def __A ( self: Dict ) -> Optional[Any]:
_A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_UpperCAmelCase )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**_UpperCAmelCase )
_A = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
_A = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
_A = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def __A ( self: Any ) -> int:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_UpperCAmelCase )
.eval()
)
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**_UpperCAmelCase )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_A = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
_A = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def __A ( self: Union[str, Any] ) -> List[str]:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(_UpperCAmelCase )
.eval()
)
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**_UpperCAmelCase )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
_A = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def __A ( self: Any ) -> Optional[int]:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_UpperCAmelCase )
.eval()
)
_A = self.default_image_processor
_A = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
_A = inputs['''pixel_values'''].to(_UpperCAmelCase )
_A = [el.to(_UpperCAmelCase ) for el in inputs['''mask_labels''']]
_A = [el.to(_UpperCAmelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_A = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 714 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__A = '\\n Text data.\n Second line of data.'
__A = 'file'
@pytest.fixture(scope='''session''' )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_A = bytes(_lowercase , '''utf-8''' )
with zstd.open(_lowercase , '''wb''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture
def __A ( _lowercase ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f:
f.write(_lowercase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_A = input_paths[compression_format]
_A = tmp_path / '''cache'''
_A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase )
_A = cached_path(_lowercase , download_config=_lowercase )
with open(_lowercase ) as f:
_A = f.read()
with open(_lowercase ) as f:
_A = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = '''custom_cache'''
_A = '''custom_extracted_dir'''
_A = tmp_path / '''custom_extracted_path'''
if default_extracted:
_A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) )
_A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_A = xz_file
_A = (
DownloadConfig(extract_compressed_file=_lowercase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase )
)
_A = cached_path(_lowercase , download_config=_lowercase )
assert Path(_lowercase ).parent.parts[-2:] == expected
def __A ( _lowercase ):
'''simple docstring'''
_A = str(Path(_lowercase ).resolve() )
assert cached_path(_lowercase ) == text_file
# relative path
_A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowercase ) == text_file
def __A ( _lowercase ):
'''simple docstring'''
_A = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_lowercase ):
cached_path(_lowercase )
# relative path
_A = '''./__missing_file__.txt'''
with pytest.raises(_lowercase ):
cached_path(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(_lowercase ) as f:
_A = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( ):
'''simple docstring'''
with pytest.raises(_lowercase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
http_get('''https://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
fsspec_head('''s3://huggingface.co''' )
| 62 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ):
'''simple docstring'''
_A = {"""add_prefix_space""": True} if isinstance(__A , __A ) and not line.startswith(''' ''' ) else {}
_A = padding_side
return tokenizer(
[line] , max_length=__A , padding='''max_length''' if pad_to_max_length else None , truncation=__A , return_tensors=__A , add_special_tokens=__A , **__A , )
def __A ( _lowercase , _lowercase , _lowercase=None , ):
'''simple docstring'''
_A = input_ids.ne(__A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self: List[Any] , __A: Optional[int] , __A: List[str] , __A: int , __A: Optional[int] , __A: Any="train" , __A: Union[str, Any]=None , __A: int=None , __A: Dict=None , __A: Any="" , ) -> str:
super().__init__()
_A = Path(_UpperCamelCase ).joinpath(type_path + '''.source''' )
_A = Path(_UpperCamelCase ).joinpath(type_path + '''.target''' )
_A = self.get_char_lens(self.src_file )
_A = max_source_length
_A = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
_A = tokenizer
_A = prefix
if n_obs is not None:
_A = self.src_lens[:n_obs]
_A = src_lang
_A = tgt_lang
def __len__( self: List[str] ) -> Union[str, Any]:
return len(self.src_lens )
def __getitem__( self: Optional[int] , __A: str ) -> Dict[str, torch.Tensor]:
_A = index + 1 # linecache starts at 1
_A = self.prefix + linecache.getline(str(self.src_file ) , _UpperCamelCase ).rstrip('''\n''' )
_A = linecache.getline(str(self.tgt_file ) , _UpperCamelCase ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_A = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
)
_A = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
_A = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_source_length , '''right''' )
_A = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_target_length , '''right''' )
_A = source_inputs["""input_ids"""].squeeze()
_A = target_inputs["""input_ids"""].squeeze()
_A = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __A ( __A: int ) -> Optional[Any]:
return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()]
def __A ( self: int , __A: int ) -> Dict[str, torch.Tensor]:
_A = torch.stack([x['''input_ids'''] for x in batch] )
_A = torch.stack([x['''attention_mask'''] for x in batch] )
_A = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_A = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
_A = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
_A = trim_batch(_UpperCamelCase , _UpperCamelCase )
_A = trim_batch(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase )
_A = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__A = getLogger(__name__)
def __A ( _lowercase ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__A ) )
def __A ( _lowercase ):
'''simple docstring'''
_A = get_git_info()
save_json(__A , os.path.join(__A , '''git_log.json''' ) )
def __A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ):
'''simple docstring'''
with open(__A , '''w''' ) as f:
json.dump(__A , __A , indent=__A , **__A )
def __A ( _lowercase ):
'''simple docstring'''
with open(__A ) as f:
return json.load(__A )
def __A ( ):
'''simple docstring'''
_A = git.Repo(search_parent_directories=__A )
_A = {
"""repo_id""": str(__A ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
return list(map(__A , __A ) )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with open(__A , '''wb''' ) as f:
return pickle.dump(__A , __A )
def __A ( _lowercase ):
'''simple docstring'''
def remove_articles(_lowercase ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , __A )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
_A = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = normalize_answer(__A ).split()
_A = normalize_answer(__A ).split()
_A = Counter(__A ) & Counter(__A )
_A = sum(common.values() )
if num_same == 0:
return 0
_A = 1.0 * num_same / len(__A )
_A = 1.0 * num_same / len(__A )
_A = (2 * precision * recall) / (precision + recall)
return fa
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
return normalize_answer(__A ) == normalize_answer(__A )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
assert len(__A ) == len(__A )
_A = 0
for hypo, pred in zip(__A , __A ):
em += exact_match_score(__A , __A )
if len(__A ) > 0:
em /= len(__A )
return {"em": em}
def __A ( _lowercase ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_A = """dropout_rate"""
for p in extra_params:
if getattr(__A , __A , __A ):
if not hasattr(__A , __A ) and not hasattr(__A , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__A ) )
delattr(__A , __A )
continue
_A = p if hasattr(__A , __A ) else equivalent_param[p]
setattr(__A , __A , getattr(__A , __A ) )
delattr(__A , __A )
return hparams, config
| 715 |
import math
def __A ( _lowercase ):
'''simple docstring'''
_A = []
_A = 2
_A = int(math.sqrt(_lowercase ) ) # Size of every segment
_A = [True] * (end + 1)
_A = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowercase )
for i in range(start * start , end + 1 , _lowercase ):
_A = False
start += 1
prime += in_prime
_A = end + 1
_A = min(2 * end , _lowercase )
while low <= n:
_A = [True] * (high - low + 1)
for each in in_prime:
_A = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowercase , high + 1 , _lowercase ):
_A = False
for j in range(len(_lowercase ) ):
if temp[j] is True:
prime.append(j + low )
_A = high + 1
_A = min(high + end , _lowercase )
return prime
print(sieve(10**6))
| 62 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[int] , __A: Optional[Any] , __A: str=13 , __A: str=7 , __A: int=True , __A: Optional[Any]=True , __A: int=True , __A: Any=True , __A: Optional[int]=99 , __A: Union[str, Any]=32 , __A: int=5 , __A: List[Any]=4 , __A: Any=37 , __A: List[str]="gelu" , __A: Union[str, Any]=0.1 , __A: List[Any]=0.1 , __A: int=5_12 , __A: Optional[Any]=16 , __A: Any=2 , __A: List[str]=0.02 , __A: str=4 , ) -> Optional[Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_attention_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_choices
def __A ( self: Any ) -> Optional[Any]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_attention_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self: List[Any] ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
_A = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( a__ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self: Any ) -> str:
_A = FlaxAlbertModelTester(self )
@slow
def __A ( self: List[Any] ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_A = model_class_name.from_pretrained('''albert-base-v2''' )
_A = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self: List[str] ) -> int:
_A = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_A = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_A = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_A = model(_A , attention_mask=_A )[0]
_A = (1, 11, 7_68)
self.assertEqual(output.shape , _A )
_A = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 716 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = jnp.floataa
def __A ( self: Tuple ) -> Tuple:
_A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __A: Dict ) -> Tuple:
_A ,_A ,_A ,_A = hidden_states.shape
_A = jax.image.resize(
__A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
_A = self.conv(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = jnp.floataa
def __A ( self: List[str] ) -> Tuple:
_A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_A = self.conv(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = None
A_ = 0.0
A_ = None
A_ = jnp.floataa
def __A ( self: Dict ) -> Dict:
_A = self.in_channels if self.out_channels is None else self.out_channels
_A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_A = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_A = nn.Dense(__A , dtype=self.dtype )
_A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_A = nn.Dropout(self.dropout_prob )
_A = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_A = None
if use_nin_shortcut:
_A = nn.Conv(
__A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]:
_A = hidden_states
_A = self.norma(__A )
_A = nn.swish(__A )
_A = self.conva(__A )
_A = self.time_emb_proj(nn.swish(__A ) )
_A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 )
_A = hidden_states + temb
_A = self.norma(__A )
_A = nn.swish(__A )
_A = self.dropout(__A , __A )
_A = self.conva(__A )
if self.conv_shortcut is not None:
_A = self.conv_shortcut(__A )
return hidden_states + residual
| 62 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( _A , _A , _A , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInstructPixaPixPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self: int ) -> List[Any]:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_A = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_A = CLIPTextModel(__lowerCamelCase )
_A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self: Tuple , __A: Union[str, Any] , __A: Union[str, Any]=0 ) -> Optional[Any]:
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' )
if str(__lowerCamelCase ).startswith('''mps''' ):
_A = torch.manual_seed(__lowerCamelCase )
else:
_A = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def __A ( self: Optional[int] ) -> List[str]:
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase )
_A = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_A = self.get_dummy_inputs(__lowerCamelCase )
_A = sd_pipe(**__lowerCamelCase ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self: List[str] ) -> Optional[Any]:
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase )
_A = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_A = self.get_dummy_inputs(__lowerCamelCase )
_A = "french fries"
_A = sd_pipe(**__lowerCamelCase , negative_prompt=__lowerCamelCase )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self: Dict ) -> int:
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase )
_A = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_A = self.get_dummy_inputs(__lowerCamelCase )
_A = [inputs["prompt"]] * 2
_A = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
_A = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase )
_A = image / 2 + 0.5
_A = image.permute(0 , 3 , 1 , 2 )
_A = image.repeat(2 , 1 , 1 , 1 )
_A = sd_pipe(**__lowerCamelCase ).images
_A = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_A = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self: str ) -> List[Any]:
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
_A = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase )
_A = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_A = self.get_dummy_inputs(__lowerCamelCase )
_A = sd_pipe(**__lowerCamelCase ).images
_A = image[0, -3:, -3:, -1]
_A = [round(__lowerCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(__lowerCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self: Optional[int] ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __A ( self: int ) -> int:
_A = self.get_dummy_components()
_A = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase )
_A = VaeImageProcessor(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase )
_A = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_A = pipe(**self.get_dummy_inputs_by_type(__lowerCamelCase , input_image_type='''pt''' ) )[0]
_A = components["vae"]
_A = self.get_dummy_inputs_by_type(__lowerCamelCase , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_A = vae.encode(inputs[image_param] ).latent_dist.mode()
_A = pipe(**__lowerCamelCase )[0]
_A = np.abs(out - out_latents_inputs ).max()
self.assertLess(__lowerCamelCase , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Optional[int] ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self: List[Any] , __A: str=0 ) -> List[str]:
_A = torch.manual_seed(__lowerCamelCase )
_A = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
_A = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def __A ( self: str ) -> Any:
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
_A = self.get_inputs()
_A = pipe(**__lowerCamelCase ).images
_A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __A ( self: Dict ) -> Tuple:
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__lowerCamelCase )
_A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
_A = self.get_inputs()
_A = pipe(**__lowerCamelCase ).images
_A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __A ( self: Tuple ) -> List[str]:
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__lowerCamelCase )
_A = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
_A = self.get_inputs()
_A = pipe(**__lowerCamelCase ).images
_A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __A ( self: List[Any] ) -> Optional[int]:
_A = 0
def callback_fn(__A: int , __A: int , __A: torch.FloatTensor ) -> None:
_A = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_A = latents[0, -3:, -3:, -1]
_A = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_A = latents[0, -3:, -3:, -1]
_A = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_A = False
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa )
_A = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
_A = self.get_inputs()
pipe(**__lowerCamelCase , callback=__lowerCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __A ( self: Any ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa )
_A = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = self.get_inputs()
_A = pipe(**__lowerCamelCase )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __A ( self: int ) -> Optional[Any]:
_A = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_A = inputs["image"].resize((5_04, 5_04) )
_A = "timbrooks/instruct-pix2pix"
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__lowerCamelCase , safety_checker=__lowerCamelCase , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
_A = pipe(**__lowerCamelCase )
_A = output.images[0]
_A = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
_A = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 717 |
def __A ( _lowercase ):
'''simple docstring'''
_A = [0] * len(_lowercase )
_A = []
_A = []
_A = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowercase ) ):
if indegree[i] == 0:
queue.append(_lowercase )
while queue:
_A = queue.pop(0 )
cnt += 1
topo.append(_lowercase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowercase )
if cnt != len(_lowercase ):
print('''Cycle exists''' )
else:
print(_lowercase )
# Adjacency List of Graph
__A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 62 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self: List[str] , __A: Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
super().__init__()
_A = nn.ModuleList(lowerCamelCase_ )
def __A ( self: Any , __A: torch.FloatTensor , __A: Union[torch.Tensor, float, int] , __A: torch.Tensor , __A: List[torch.tensor] , __A: List[float] , __A: Optional[torch.Tensor] = None , __A: Optional[torch.Tensor] = None , __A: Optional[torch.Tensor] = None , __A: Optional[Dict[str, Any]] = None , __A: bool = False , __A: bool = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
_A = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
_A = down_samples, mid_sample
else:
_A = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __A ( self: Any , __A: Union[str, os.PathLike] , __A: bool = True , __A: Callable = None , __A: bool = False , __A: Optional[str] = None , ) -> Optional[int]:
_A = 0
_A = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
_A = model_path_to_save + f"""_{idx}"""
@classmethod
def __A ( cls: Dict , __A: Optional[Union[str, os.PathLike]] , **__A: Tuple ) -> List[Any]:
_A = 0
_A = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_A = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
_A = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
_A = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(lowerCamelCase_ )
| 718 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class SCREAMING_SNAKE_CASE ( snake_case , snake_case ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__A )
# standard deviation of the initial noise distribution
_A = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_A = 4
# running values
_A = []
def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int:
_A = num_inference_steps
_A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_A = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_A = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_A = torch.sin(steps * math.pi / 2 ) ** 2
_A = (1.0 - self.betas**2) ** 0.5
_A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_A = timesteps.to(__A )
_A = []
def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
_A = (self.timesteps == timestep).nonzero().item()
_A = timestep_index + 1
_A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__A )
if len(self.ets ) == 1:
_A = self.ets[-1]
elif len(self.ets ) == 2:
_A = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_A = self._get_prev_sample(__A , __A , __A , __A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor:
return sample
def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]:
_A = self.alphas[timestep_index]
_A = self.betas[timestep_index]
_A = self.alphas[prev_timestep_index]
_A = self.betas[prev_timestep_index]
_A = (sample - sigma * ets) / max(__A , 1e-8 )
_A = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[str] ) -> Dict:
return self.config.num_train_timesteps
| 62 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def __A ( _lowercase , _lowercase , _lowercase = 1_60_00 ):
'''simple docstring'''
_A = int(round(sample_rate * max_length ) )
if len(_A ) <= sample_length:
return wav
_A = randint(0 , len(_A ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = field(default=_UpperCAmelCase , metadata={"help": "Name of a dataset from the datasets package"} )
A_ = field(
default=_UpperCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A_ = field(
default=_UpperCAmelCase , metadata={"help": "A file containing the training audio paths and labels."} )
A_ = field(
default=_UpperCAmelCase , metadata={"help": "A file containing the validation audio paths and labels."} )
A_ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
A_ = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to \'validation\'"
)
} , )
A_ = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to \'audio\'"} , )
A_ = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to \'label\'"} )
A_ = field(
default=_UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A_ = field(
default=_UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
A_ = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
A_ = field(
default=_UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A_ = field(
default=_UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
A_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A_ = field(
default=_UpperCAmelCase , metadata={"help": "Name or path of preprocessor config."} )
A_ = field(
default=_UpperCAmelCase , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
A_ = field(
default=_UpperCAmelCase , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
A_ = field(
default=_UpperCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
A_ = field(
default=_UpperCAmelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
A_ = field(
default=_UpperCAmelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __A ( self: Dict ) -> int:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __UpperCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def __A ( ):
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A ,_A ,_A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A ,_A ,_A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , _A , _A )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
_A = DatasetDict()
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_A = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_A = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_A = feature_extractor.model_input_names[0]
def train_transforms(_lowercase ):
_A = []
for audio in batch[data_args.audio_column_name]:
_A = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_A )
_A = feature_extractor(_A , sampling_rate=feature_extractor.sampling_rate )
_A = {model_input_name: inputs.get(_A )}
_A = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowercase ):
_A = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
_A = feature_extractor(_A , sampling_rate=feature_extractor.sampling_rate )
_A = {model_input_name: inputs.get(_A )}
_A = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_A = raw_datasets['''train'''].features[data_args.label_column_name].names
_A ,_A = {}, {}
for i, label in enumerate(_A ):
_A = str(_A )
_A = label
# Load the accuracy metric from the datasets package
_A = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowercase ):
_A = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_A , references=eval_pred.label_ids )
_A = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_A ) , labelaid=_A , idalabel=_A , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_A = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_A , output_all_columns=_A )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_A = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_A , output_all_columns=_A )
# Initialize our trainer
_A = Trainer(
model=_A , args=_A , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_A , tokenizer=_A , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=_A )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics('''eval''' , _A )
trainer.save_metrics('''eval''' , _A )
# Write model card and (optionally) push to hub
_A = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
if __name__ == "__main__":
main()
| 719 |
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A ,_A = len(_lowercase ), len(grid[0] )
if (
min(_lowercase , _lowercase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_A = 0
count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self: List[Any] , __A: Any , __A: List[Any] , __A: Union[str, Any] ) -> Union[str, Any]:
_A = dataset
_A = process
_A = params
def __len__( self: Dict ) -> Any:
return len(self.dataset )
def __getitem__( self: Tuple , __A: Optional[Any] ) -> List[Any]:
_A = self.dataset[i]
_A = self.process(snake_case_ , **self.params )
return processed
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self: Optional[Any] , __A: List[Any] , __A: Dict , __A: List[Any] , __A: Tuple=None ) -> Dict:
_A = loader
_A = infer
_A = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_A = None
_A = loader_batch_size
# Internal bookkeeping
_A = None
_A = None
def __len__( self: Union[str, Any] ) -> Tuple:
return len(self.loader )
def __iter__( self: Any ) -> Optional[Any]:
_A = iter(self.loader )
return self
def __A ( self: List[Any] ) -> Optional[int]:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_A = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_A = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case_ , snake_case_ ):
# Convert ModelOutput to tuple first
_A = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_A = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_A = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_A = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_A = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_A = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_A = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_A = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_A = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_A = self._loader_batch_data.__class__(snake_case_ )
self._loader_batch_index += 1
return result
def __A ( self: int ) -> List[Any]:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_A = next(self.iterator )
_A = self.infer(snake_case_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case_ , torch.Tensor ):
_A = processed
else:
_A = list(processed.keys() )[0]
_A = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_A = len(snake_case_ )
else:
_A = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_A = observed_batch_size
# Setting internal index to unwrap the batch
_A = processed
_A = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self: Any , __A: Tuple , __A: int , __A: Dict , __A: Union[str, Any]=None ) -> int:
super().__init__(snake_case_ , snake_case_ , snake_case_ )
def __iter__( self: Tuple ) -> Union[str, Any]:
_A = iter(self.loader )
_A = None
return self
def __A ( self: Optional[int] ) -> Any:
if self.subiterator is None:
_A = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_A = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_A = self.infer(next(self.iterator ) , **self.params )
_A = next(self.subiterator )
return processed
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __iter__( self: Optional[Any] ) -> str:
_A = iter(self.loader )
return self
def __A ( self: str ) -> Tuple:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_A = False
_A = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_A = self.loader_batch_item()
_A = item.pop('''is_last''' )
accumulator.append(snake_case_ )
if is_last:
return accumulator
while not is_last:
_A = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case_ , torch.Tensor ):
_A = processed
else:
_A = list(processed.keys() )[0]
_A = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_A = len(snake_case_ )
else:
_A = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_A = observed_batch_size
_A = processed
_A = 0
while self._loader_batch_index < self.loader_batch_size:
_A = self.loader_batch_item()
_A = item.pop('''is_last''' )
accumulator.append(snake_case_ )
if is_last:
return accumulator
else:
_A = processed
_A = item.pop('''is_last''' )
accumulator.append(snake_case_ )
return accumulator
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self: str , __A: Any , __A: Union[str, Any] ) -> Any:
_A = dataset
_A = key
def __len__( self: Tuple ) -> int:
return len(self.dataset )
def __getitem__( self: Tuple , __A: Tuple ) -> Dict:
return self.dataset[i][self.key]
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: int ) -> Optional[int]:
_A = dataset
_A = keya
_A = keya
def __len__( self: Optional[int] ) -> List[str]:
return len(self.dataset )
def __getitem__( self: Tuple , __A: Dict ) -> Any:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 720 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__A = NewType('DataClass', Any)
__A = NewType('DataClassType', Any)
def __A ( _lowercase ):
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __A ( _lowercase ):
'''simple docstring'''
_A = {str(_lowercase ): choice for choice in choices}
return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase )
def __A ( *,
_lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_A = {}
if aliases is not None:
_A = aliases
if help is not None:
_A = help
return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 42
def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_A = ArgumentDefaultsHelpFormatter
super().__init__(**__A )
if dataclasses.is_dataclass(__A ):
_A = [dataclass_types]
_A = list(__A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__A )
@staticmethod
def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str:
_A = f"""--{field.name}"""
_A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __A ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
_A = kwargs.pop('''aliases''' , [] )
if isinstance(__A , __A ):
_A = [aliases]
_A = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__A ) not in field.type.__args__:
# filter `str` in Union
_A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_A = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_A = (
field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1]
)
_A = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_A = {}
if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )):
if origin_type is Literal:
_A = field.type.__args__
else:
_A = [x.value for x in field.type]
_A = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
_A = field.default
else:
_A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_A = copy(__A )
# Hack because type=bool in argparse does not behave as we want.
_A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_A = default
# This tells argparse we accept 0 or 1 value after --field_name
_A = '''?'''
# This is the value that will get picked if we do --field_name (without value)
_A = True
elif isclass(__A ) and issubclass(__A , __A ):
_A = field.type.__args__[0]
_A = '''+'''
if field.default_factory is not dataclasses.MISSING:
_A = field.default_factory()
elif field.default is dataclasses.MISSING:
_A = True
else:
_A = field.type
if field.default is not dataclasses.MISSING:
_A = field.default
elif field.default_factory is not dataclasses.MISSING:
_A = field.default_factory()
else:
_A = True
parser.add_argument(__A , *__A , **__A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_A = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A )
def __A ( self: Dict , __A: DataClassType ) -> List[Any]:
if hasattr(__A , '''_argument_group_name''' ):
_A = self.add_argument_group(dtype._argument_group_name )
else:
_A = self
try:
_A = get_type_hints(__A )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ):
_A = '''.'''.join(map(__A , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__A ):
if not field.init:
continue
_A = type_hints[field.name]
self._parse_dataclass_field(__A , __A )
def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_A = []
if args_filename:
args_files.append(Path(__A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_A = ArgumentParser()
args_file_parser.add_argument(__A , type=__A , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
_A ,_A = args_file_parser.parse_known_args(args=__A )
_A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A )
if cmd_args_file_paths:
args_files.extend([Path(__A ) for p in cmd_args_file_paths] )
_A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_A = file_args + args if args is not None else file_args + sys.argv[1:]
_A ,_A = self.parse_known_args(args=__A )
_A = []
for dtype in self.dataclass_types:
_A = {f.name for f in dataclasses.fields(__A ) if f.init}
_A = {k: v for k, v in vars(__A ).items() if k in keys}
for k in keys:
delattr(__A , __A )
_A = dtype(**__A )
outputs.append(__A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]:
_A = set(args.keys() )
_A = []
for dtype in self.dataclass_types:
_A = {f.name for f in dataclasses.fields(__A ) if f.init}
_A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_A = dtype(**__A )
outputs.append(__A )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" )
return tuple(__A )
def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]:
with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file:
_A = json.loads(open_json_file.read() )
_A = self.parse_dict(__A , allow_extra_keys=__A )
return tuple(__A )
def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]:
_A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A )
return tuple(__A )
| 62 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
# TODO Update this
__A = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
A_ = "esm"
def __init__( self: Optional[int] , __A: List[str]=None , __A: str=None , __A: Union[str, Any]=None , __A: str=7_68 , __A: Any=12 , __A: Tuple=12 , __A: Optional[int]=30_72 , __A: Tuple=0.1 , __A: Optional[int]=0.1 , __A: Union[str, Any]=10_26 , __A: Optional[Any]=0.02 , __A: Dict=1e-12 , __A: int="absolute" , __A: str=True , __A: Dict=None , __A: Union[str, Any]=False , __A: Tuple=False , __A: Optional[int]=None , __A: Any=None , **__A: Dict , ) -> str:
super().__init__(pad_token_id=_lowercase , mask_token_id=_lowercase , **_lowercase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = emb_layer_norm_before
_A = token_dropout
_A = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_A = EsmFoldConfig()
elif isinstance(_lowercase , _lowercase ):
_A = EsmFoldConfig(**_lowercase )
_A = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_A = get_default_vocab_list()
else:
_A = vocab_list
else:
_A = None
_A = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , _lowercase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self: Union[str, Any] ) -> Any:
_A = super().to_dict()
if isinstance(self.esmfold_config , _lowercase ):
_A = self.esmfold_config.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = None
A_ = True
A_ = False
A_ = False
A_ = False
A_ = 0
A_ = True
A_ = False
A_ = 128
A_ = None
def __A ( self: Optional[int] ) -> List[Any]:
if self.trunk is None:
_A = TrunkConfig()
elif isinstance(self.trunk , _lowercase ):
_A = TrunkConfig(**self.trunk )
def __A ( self: Optional[Any] ) -> Union[str, Any]:
_A = asdict(self )
_A = self.trunk.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 48
A_ = 1_024
A_ = 128
A_ = 32
A_ = 32
A_ = 32
A_ = 0
A_ = 0
A_ = False
A_ = 4
A_ = 128
A_ = None
def __A ( self: Dict ) -> Dict:
if self.structure_module is None:
_A = StructureModuleConfig()
elif isinstance(self.structure_module , _lowercase ):
_A = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_A = self.sequence_state_dim // self.sequence_head_width
_A = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self: Any ) -> Dict:
_A = asdict(self )
_A = self.structure_module.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 384
A_ = 128
A_ = 16
A_ = 128
A_ = 12
A_ = 4
A_ = 8
A_ = 0.1
A_ = 8
A_ = 1
A_ = 2
A_ = 7
A_ = 10
A_ = 1e-8
A_ = 1e5
def __A ( self: List[Any] ) -> List[Any]:
return asdict(self )
def __A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 721 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str:
_A = parent
_A = batch_size
_A = is_training
_A = use_auxiliary_loss
_A = num_queries
_A = num_channels
_A = min_size
_A = max_size
_A = num_labels
_A = mask_feature_size
def __A ( self: Dict ) -> Optional[int]:
_A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__A )
_A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A )
_A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5
).float()
_A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long()
_A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __A ( self: Optional[Any] ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __A ( self: Dict ) -> Tuple:
_A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs()
_A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int:
_A = output.encoder_hidden_states
_A = output.pixel_decoder_hidden_states
_A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers )
def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any:
with torch.no_grad():
_A = MaskFormerModel(config=__A )
model.to(__A )
model.eval()
_A = model(pixel_values=__A , pixel_mask=__A )
_A = model(__A , output_hidden_states=__A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__A , __A )
def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int:
_A = MaskFormerForInstanceSegmentation(config=__A )
model.to(__A )
model.eval()
def comm_check_on_output(__A: int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_A = model(pixel_values=__A , pixel_mask=__A )
_A = model(__A )
comm_check_on_output(__A )
_A = model(
pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A )
comm_check_on_output(__A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def __A ( self: int ) -> Tuple:
_A = MaskFormerModelTester(self )
_A = ConfigTester(self , config_class=__A , has_text_modality=__A )
def __A ( self: List[Any] ) -> Dict:
self.config_tester.run_common_tests()
def __A ( self: Optional[Any] ) -> int:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A )
def __A ( self: Dict ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def __A ( self: int ) -> Tuple:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def __A ( self: List[Any] ) -> Any:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def __A ( self: Union[str, Any] ) -> Optional[int]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def __A ( self: int ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __A ( self: Union[str, Any] ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: List[Any] ) -> Any:
pass
def __A ( self: Dict ) -> Optional[Any]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
@slow
def __A ( self: int ) -> Optional[Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_A = MaskFormerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __A ( self: Optional[Any] ) -> Optional[int]:
_A = (self.model_tester.min_size,) * 2
_A = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__A ),
'''mask_labels''': torch.randn((2, 10, *size) , device=__A ),
'''class_labels''': torch.zeros(2 , 10 , device=__A ).long(),
}
_A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A )
_A = model(**__A )
self.assertTrue(outputs.loss is not None )
def __A ( self: Optional[Any] ) -> List[Any]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A )
def __A ( self: Any ) -> Tuple:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A ).to(__A )
_A = model(**__A , output_attentions=__A )
self.assertTrue(outputs.attentions is not None )
def __A ( self: Dict ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_A = self.all_model_classes[1]
_A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs()
_A = model_class(__A )
model.to(__A )
model.train()
_A = model(__A , mask_labels=__A , class_labels=__A ).loss
loss.backward()
def __A ( self: Tuple ) -> Optional[Any]:
# only MaskFormerForInstanceSegmentation has the loss
_A = self.all_model_classes[1]
_A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs()
_A = True
_A = True
_A = model_class(__A )
model.to(__A )
model.train()
_A = model(__A , mask_labels=__A , class_labels=__A )
_A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def __A ( ):
'''simple docstring'''
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self: Union[str, Any] ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def __A ( self: List[Any] ) -> Any:
_A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__A , return_tensors='''pt''' ).to(__A )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**__A )
_A = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) )
_A = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) )
_A = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) )
def __A ( self: Dict ) -> Dict:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__A )
.eval()
)
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__A , return_tensors='''pt''' ).to(__A )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**__A )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_A = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
_A = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) )
def __A ( self: List[Any] ) -> Dict:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__A )
.eval()
)
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__A , return_tensors='''pt''' ).to(__A )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**__A )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
_A = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) )
def __A ( self: Optional[Any] ) -> str:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__A )
.eval()
)
_A = self.default_image_processor
_A = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
_A = inputs['''pixel_values'''].to(__A )
_A = [el.to(__A ) for el in inputs['''mask_labels''']]
_A = [el.to(__A ) for el in inputs['''class_labels''']]
with torch.no_grad():
_A = model(**__A )
self.assertTrue(outputs.loss is not None )
| 62 | 0 |
import baseaa
def __A ( _lowercase ):
'''simple docstring'''
return baseaa.baaencode(string.encode('''utf-8''' ) )
def __A ( _lowercase ):
'''simple docstring'''
return baseaa.baadecode(snake_case__ ).decode('''utf-8''' )
if __name__ == "__main__":
__A = 'Hello World!'
__A = baseaa_encode(test)
print(encoded)
__A = baseaa_decode(encoded)
print(decoded)
| 700 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str:
_A = question_encoder
_A = generator
_A = self.question_encoder
def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict:
if os.path.isfile(__A ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__A , exist_ok=__A )
_A = os.path.join(__A , '''question_encoder_tokenizer''' )
_A = os.path.join(__A , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__A )
self.generator.save_pretrained(__A )
@classmethod
def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_A = kwargs.pop('''config''' , __A )
if config is None:
_A = RagConfig.from_pretrained(__A )
_A = AutoTokenizer.from_pretrained(
__A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
_A = AutoTokenizer.from_pretrained(
__A , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__A , generator=__A )
def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int:
return self.current_tokenizer(*__A , **__A )
def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict:
return self.generator.batch_decode(*__A , **__A )
def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple:
return self.generator.decode(*__A , **__A )
def __A ( self: Dict ) -> List[str]:
_A = self.question_encoder
def __A ( self: Union[str, Any] ) -> int:
_A = self.generator
def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __A , )
if max_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
__A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , )
_A = labels['''input_ids''']
return model_inputs
| 62 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 701 |
from __future__ import annotations
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741
'''simple docstring'''
while r - l > 1:
_A = (l + r) // 2
if v[m] >= key:
_A = m
else:
_A = m # noqa: E741
return r
def __A ( _lowercase ):
'''simple docstring'''
if len(_lowercase ) == 0:
return 0
_A = [0] * len(_lowercase )
_A = 1
_A = v[0]
for i in range(1 , len(_lowercase ) ):
if v[i] < tail[0]:
_A = v[i]
elif v[i] > tail[length - 1]:
_A = v[i]
length += 1
else:
_A = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE ( snake_case , snake_case ):
"""simple docstring"""
A_ = "swin"
A_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: str , __A: Dict=2_24 , __A: Optional[Any]=4 , __A: Dict=3 , __A: List[str]=96 , __A: Union[str, Any]=[2, 2, 6, 2] , __A: Dict=[3, 6, 12, 24] , __A: Dict=7 , __A: Any=4.0 , __A: Any=True , __A: Dict=0.0 , __A: Optional[int]=0.0 , __A: Any=0.1 , __A: Dict="gelu" , __A: Optional[int]=False , __A: Optional[Any]=0.02 , __A: str=1e-5 , __A: Dict=32 , __A: Optional[int]=None , __A: Any=None , **__A: Optional[Any] , ) -> List[Any]:
super().__init__(**__A )
_A = image_size
_A = patch_size
_A = num_channels
_A = embed_dim
_A = depths
_A = len(__A )
_A = num_heads
_A = window_size
_A = mlp_ratio
_A = qkv_bias
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = drop_path_rate
_A = hidden_act
_A = use_absolute_embeddings
_A = layer_norm_eps
_A = initializer_range
_A = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A = int(embed_dim * 2 ** (len(__A ) - 1) )
_A = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(__A ) + 1 )]
_A ,_A = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = version.parse("1.11" )
@property
def __A ( self: Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A ( self: Dict ) -> float:
return 1e-4
| 702 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "sequence-classification"
def __init__( self: str , __A: Union[str, Any] ) -> List[str]:
if type(__A ) == dict:
_A = Namespace(**__A )
_A = glue_output_modes[hparams.task]
_A = glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]:
return self.model(**__A )
def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]:
_A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_A = self(**__A )
_A = outputs[0]
_A = self.trainer.lr_schedulers[0]['''scheduler''']
_A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __A ( self: List[str] ) -> Dict:
_A = self.hparams
_A = processors[args.task]()
_A = processor.get_labels()
for mode in ["train", "dev"]:
_A = self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __A )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
_A = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
_A = convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __A )
torch.save(__A , __A )
def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader:
_A = '''dev''' if mode == '''test''' else mode
_A = self._feature_file(__A )
logger.info('''Loading features from cached file %s''' , __A )
_A = torch.load(__A )
_A = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_A = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_A = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def __A ( self: List[str] , __A: str , __A: Tuple ) -> str:
_A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_A = self(**__A )
_A ,_A = outputs[:2]
_A = logits.detach().cpu().numpy()
_A = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __A ( self: str , __A: Dict ) -> tuple:
_A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
_A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_A = np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_A = np.squeeze(__A )
_A = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
_A = [[] for _ in range(out_label_ids.shape[0] )]
_A = [[] for _ in range(out_label_ids.shape[0] )]
_A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_A = dict(results.items() )
_A = results
return ret, preds_list, out_label_list
def __A ( self: Any , __A: list ) -> dict:
_A ,_A ,_A = self._eval_end(__A )
_A = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __A ( self: int , __A: Union[str, Any] ) -> dict:
_A ,_A ,_A = self._eval_end(__A )
_A = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=__A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __A ( ):
'''simple docstring'''
_A = argparse.ArgumentParser()
add_generic_args(_lowercase , os.getcwd() )
_A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() )
_A = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_A = os.path.join(
'''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , )
os.makedirs(args.output_dir )
_A = GLUETransformer(_lowercase )
_A = generic_train(_lowercase , _lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) )
_A = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowercase )
if __name__ == "__main__":
main()
| 62 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
__A = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCAmelCase_ = field(
default=UpperCamelCase_ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
lowerCAmelCase_ = field(
default=UpperCamelCase_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase_ )} , )
lowerCAmelCase_ = field(
default=UpperCamelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase_ = field(
default=UpperCamelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase_ = field(
default=UpperCamelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCAmelCase_ = field(
default=UpperCamelCase_ , metadata={"help": "The input training data file (a text file)."} )
lowerCAmelCase_ = field(
default=UpperCamelCase_ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
lowerCAmelCase_ = field(
default=UpperCamelCase_ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCAmelCase_ = field(
default=UpperCamelCase_ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
lowerCAmelCase_ = field(
default=UpperCamelCase_ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
lowerCAmelCase_ = field(
default=UpperCamelCase_ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
lowerCAmelCase_ = field(
default=UpperCamelCase_ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
lowerCAmelCase_ = field(default=UpperCamelCase_ , metadata={"help": "Whether ot not to use whole word mask."} )
lowerCAmelCase_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
lowerCAmelCase_ = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
lowerCAmelCase_ = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
lowerCAmelCase_ = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
lowerCAmelCase_ = field(
default=UpperCamelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __A ( _lowercase , _lowercase , _lowercase = False , _lowercase = None , ):
'''simple docstring'''
def _dataset(_lowercase , _lowercase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size , ref_path=snake_case_ , )
return LineByLineTextDataset(tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=snake_case_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(snake_case_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __A ( ):
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_A = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_A = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_A = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_A = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
_A = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_A = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
_A = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
_A = AutoModelWithLMHead.from_config(snake_case_ )
model.resize_token_embeddings(len(snake_case_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
_A = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_A = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_A = (
get_dataset(snake_case_ , tokenizer=snake_case_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_A = (
get_dataset(snake_case_ , tokenizer=snake_case_ , evaluate=snake_case_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_A = DataCollatorForPermutationLanguageModeling(
tokenizer=snake_case_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_A = DataCollatorForWholeWordMask(
tokenizer=snake_case_ , mlm_probability=data_args.mlm_probability )
else:
_A = DataCollatorForLanguageModeling(
tokenizer=snake_case_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_A = Trainer(
model=snake_case_ , args=snake_case_ , data_collator=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , prediction_loss_only=snake_case_ , )
# Training
if training_args.do_train:
_A = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=snake_case_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_A = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_A = trainer.evaluate()
_A = math.exp(eval_output['''eval_loss'''] )
_A = {"perplexity": perplexity}
_A = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(snake_case_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , snake_case_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(snake_case_ )
return results
def __A ( _lowercase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 703 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __A ( _lowercase = "" ):
'''simple docstring'''
_A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
_A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' )
_A = soup.find_all('''td''' , attrs='''titleColumn''' )
_A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowercase , _lowercase )
}
def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
_A = get_imdb_top_aaa_movies()
with open(_lowercase , '''w''' , newline='''''' ) as out_file:
_A = csv.writer(_lowercase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 62 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A = logging.get_logger(__name__)
__A = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A_ = "longformer"
def __init__( self: List[str] , __A: Union[List[int], int] = 5_12 , __A: int = 2 , __A: int = 1 , __A: int = 0 , __A: int = 2 , __A: int = 3_05_22 , __A: int = 7_68 , __A: int = 12 , __A: int = 12 , __A: int = 30_72 , __A: str = "gelu" , __A: float = 0.1 , __A: float = 0.1 , __A: int = 5_12 , __A: int = 2 , __A: float = 0.02 , __A: float = 1e-12 , __A: bool = False , **__A: Dict , ) -> Tuple:
super().__init__(pad_token_id=__A , **__A )
_A = attention_window
_A = sep_token_id
_A = bos_token_id
_A = eos_token_id
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = onnx_export
class SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
def __init__( self: int , __A: "PretrainedConfig" , __A: str = "default" , __A: "List[PatchingSpec]" = None ) -> str:
super().__init__(__A , __A , __A )
_A = True
@property
def __A ( self: int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __A ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
_A = super().outputs
if self.task == "default":
_A = {0: "batch"}
return outputs
@property
def __A ( self: str ) -> float:
return 1e-4
@property
def __A ( self: Any ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __A ( self: str , __A: "PreTrainedTokenizerBase" , __A: int = -1 , __A: int = -1 , __A: bool = False , __A: Optional[TensorType] = None , ) -> Mapping[str, Any]:
_A = super().generate_dummy_inputs(
preprocessor=__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_A = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
_A = 1
return inputs
| 704 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = BlenderbotSmallTokenizer
A_ = False
def __A ( self: List[str] ) -> int:
super().setUp()
_A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def __A ( self: str , **__A: Optional[Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: str , __A: List[str] ) -> int:
_A = '''adapt act apte'''
_A = '''adapt act apte'''
return input_text, output_text
def __A ( self: Union[str, Any] ) -> Any:
_A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = '''adapt act apte'''
_A = ['''adapt''', '''act''', '''ap@@''', '''te''']
_A = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def __A ( self: Any ) -> List[str]:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
_A = '''I am a small frog.'''
_A = tok([src_text] , padding=__A , truncation=__A )['''input_ids''']
_A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __A ( self: Any ) -> int:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_A = '''I am a small frog .'''
_A = '''.'''
_A = tok(__A )['''input_ids''']
_A = tok(__A )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 62 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__A = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
__A = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
__A = '▁'
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["input_ids", "attention_mask"]
A_ = BarthezTokenizer
def __init__( self: List[str] , __A: Dict=None , __A: Optional[int]=None , __A: int="<s>" , __A: Dict="</s>" , __A: Union[str, Any]="</s>" , __A: int="<s>" , __A: int="<unk>" , __A: int="<pad>" , __A: Union[str, Any]="<mask>" , **__A: int , ) -> Optional[int]:
_A = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
_A = vocab_file
_A = False if not self.vocab_file else True
def __A ( self: Any , __A: List[int] , __A: Optional[List[int]] = None ) -> Dict:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self: List[str] , __A: List[int] , __A: Optional[List[int]] = None ) -> str:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self: Optional[Any] , __A: str , __A: Optional[str] = None ) -> Tuple:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "roberta"
def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@property
def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 62 | 0 |
import numpy as np
__A = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Dict ) -> Dict:
_A = np.array(a_ )
def __A ( self: Union[str, Any] , __A: str ) -> Union[str, Any]:
_A = np.where(letter == self.SQUARE )
_A = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __A ( self: Union[str, Any] , __A: int , __A: int ) -> Any:
_A = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __A ( self: Any , __A: str ) -> Tuple:
_A = message.lower()
_A = message.replace(''' ''' , '''''' )
_A = message.replace('''j''' , '''i''' )
_A = np.empty((2, len(a_ )) )
for letter_index in range(len(a_ ) ):
_A = self.letter_to_numbers(message[letter_index] )
_A = numbers[0]
_A = numbers[1]
_A = first_step.reshape(2 * len(a_ ) )
_A = """"""
for numbers_index in range(len(a_ ) ):
_A = int(second_step[numbers_index * 2] )
_A = int(second_step[(numbers_index * 2) + 1] )
_A = self.numbers_to_letter(a_ , a_ )
_A = encoded_message + letter
return encoded_message
def __A ( self: Any , __A: str ) -> List[Any]:
_A = message.lower()
message.replace(''' ''' , '''''' )
_A = np.empty(2 * len(a_ ) )
for letter_index in range(len(a_ ) ):
_A = self.letter_to_numbers(message[letter_index] )
_A = numbers[0]
_A = numbers[1]
_A = first_step.reshape((2, len(a_ )) )
_A = """"""
for numbers_index in range(len(a_ ) ):
_A = int(second_step[0, numbers_index] )
_A = int(second_step[1, numbers_index] )
_A = self.numbers_to_letter(a_ , a_ )
_A = decoded_message + letter
return decoded_message
| 706 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__A = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]:
super().__init__(*__A , **__A )
_A = eval_examples
_A = post_process_function
_A = quant_trainer_args
_A = 1_28 # default number of calibration samples
def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
_A = calib_dataset if calib_dataset is not None else self.calib_dataset
_A = self._remove_unused_columns(__A , description='''Calibration''' )
return DataLoader(
__A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , )
def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]:
_A = self.train_dataset if calib_dataset is None else calib_dataset
_A = self.get_calib_dataloader(__A )
_A = self.model
quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A )
model.eval()
quant_trainer.enable_calibration(__A )
logger.info('''***** Running calibration *****''' )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(__A ):
# Prediction step
_A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__A , self.quant_trainer_args )
_A = model
def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int:
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(__A )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
__A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
_A = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_A = self.post_process_function(__A , __A , output.predictions )
_A = self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_A = metrics.pop(__A )
self.log(__A )
else:
_A = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A )
return metrics
def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]:
_A = self.get_test_dataloader(__A )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
__A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
_A = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(__A , __A , output.predictions , '''predict''' )
_A = self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_A = metrics.pop(__A )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A )
def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]:
_A = self.eval_dataset
_A = self.get_eval_dataloader(__A )
_A = next(iter(__A ) )
# saving device - to make it consistent
_A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
_A = tuple(v.to(__A ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
_A = True
_A = self.model.to(__A )
model.eval()
model.float()
_A = model.module if hasattr(__A , '''module''' ) else model
quant_trainer.configure_model(__A , self.quant_trainer_args )
_A = os.path.join(__A , '''model.onnx''' )
logger.info(f"""exporting model to {output_model_file}""" )
_A = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__A , )
logger.info('''onnx export finished''' )
| 62 | 0 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __A ( _lowercase ):
'''simple docstring'''
_A = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
_A = re.match(R'''^mobilenet_v1_([^_]*)_([^_]*)$''' , __snake_case )
if matches:
_A = float(matches[1] )
_A = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_A = 10_01
_A = '''imagenet-1k-id2label.json'''
_A = '''huggingface/label-files'''
_A = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_A = {int(__snake_case ) + 1: v for k, v in idalabel.items()}
_A = '''background'''
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def __A ( ):
'''simple docstring'''
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
_A = get_mobilenet_va_config(__snake_case )
# Load 🤗 model
_A = MobileNetVaForImageClassification(__snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__snake_case , __snake_case , __snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_A = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
_A = image_processor(images=prepare_img() , return_tensors='''pt''' )
_A = model(**__snake_case )
_A = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
_A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
_A = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
_A = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print('''Pushing to the hub...''' )
_A = '''google/''' + model_name
image_processor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
"""simple docstring"""
A_ = "openai-gpt"
A_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Optional[int] , __A: str=4_04_78 , __A: Optional[Any]=5_12 , __A: str=7_68 , __A: List[str]=12 , __A: Tuple=12 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: str=0.1 , __A: Dict=0.1 , __A: int=1e-5 , __A: List[str]=0.02 , __A: Dict="cls_index" , __A: List[Any]=True , __A: int=None , __A: List[str]=True , __A: List[Any]=0.1 , **__A: List[Any] , ) -> str:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = afn
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_first_dropout
_A = summary_proj_to_labels
super().__init__(**lowercase_ )
| 708 |
import itertools
import string
from collections.abc import Generator, Iterable
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = iter(_lowercase )
while True:
_A = tuple(itertools.islice(_lowercase , _lowercase ) )
if not chunk:
return
yield chunk
def __A ( _lowercase ):
'''simple docstring'''
_A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_A = ''''''
if len(_lowercase ) < 2:
return dirty
for i in range(len(_lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowercase ) & 1:
clean += "X"
return clean
def __A ( _lowercase ):
'''simple docstring'''
_A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_A = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowercase )
return table
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = generate_table(_lowercase )
_A = prepare_input(_lowercase )
_A = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
_A ,_A = divmod(table.index(_lowercase ) , 5 )
_A ,_A = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = generate_table(_lowercase )
_A = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
_A ,_A = divmod(table.index(_lowercase ) , 5 )
_A ,_A = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 62 | 0 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Dict , __A: Optional[Any] , __A: str=sys.maxsize ) -> str:
_A = "bilinear"
_A = max_size
_A = short_edge_length
def __call__( self: Union[str, Any] , __A: List[str] ) -> Union[str, Any]:
_A = []
for img in imgs:
_A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
_A = size, scale * w
else:
_A = scale * h, size
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > self.max_size:
_A = self.max_size * 1.0 / max(lowerCAmelCase__ , lowerCAmelCase__ )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(lowerCAmelCase__ )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(lowerCAmelCase__ )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
lowerCAmelCase__ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase__ ).squeeze(0 )
img_augs.append(lowerCAmelCase__ )
return img_augs
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Optional[int] , __A: Union[str, Any] ) -> Optional[Any]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda __A : (x - self.pixel_mean) / self.pixel_std
def __A ( self: int , __A: Tuple ) -> str:
_A = tuple(max(lowerCAmelCase__ ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
lowerCAmelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return torch.stack(lowerCAmelCase__ ), torch.tensor(lowerCAmelCase__ )
def __call__( self: Union[str, Any] , __A: Union[str, Any] , __A: Any=False ) -> List[Any]:
with torch.no_grad():
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_A = [images]
if single_image:
assert len(lowerCAmelCase__ ) == 1
for i in range(len(lowerCAmelCase__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase__ , images.pop(lowerCAmelCase__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase__ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(lowerCAmelCase__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(lowerCAmelCase__ ) for x in images]
# now pad them to do the following operations
_A = self.pad(lowerCAmelCase__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(lowerCAmelCase__ , lowerCAmelCase__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __A ( _lowercase : Optional[Any] , _lowercase : Any ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __A ( _lowercase : Optional[Any] , _lowercase : Tuple[int, int] ):
'''simple docstring'''
assert torch.isfinite(_lowercase ).all(), "Box tensor contains infinite or NaN!"
_A = box_size
tensor[:, 0].clamp_(min=0 , max=_lowercase )
tensor[:, 1].clamp_(min=0 , max=_lowercase )
tensor[:, 2].clamp_(min=0 , max=_lowercase )
tensor[:, 3].clamp_(min=0 , max=_lowercase )
| 709 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_token_type_ids
_A = use_input_mask
_A = use_labels
_A = use_mc_token_ids
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = self.vocab_size - 1
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
if self.use_mc_token_ids:
_A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
_A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self: Optional[int] ) -> List[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]:
_A = CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
_A = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str:
_A = CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
_A = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self: Optional[int] ) -> Dict:
_A = self.prepare_config_and_inputs()
(
(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,
) = config_and_inputs
_A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any:
_A = self.num_labels
_A = CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
A_ = (CTRLLMHeadModel,) if is_torch_available() else ()
A_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __A ( self: Any ) -> Union[str, Any]:
_A = CTRLModelTester(self )
_A = ConfigTester(self , config_class=__A , n_embd=37 )
def __A ( self: Optional[int] ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __A ( self: Dict ) -> Any:
self.config_tester.run_common_tests()
def __A ( self: str ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def __A ( self: List[str] ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: Optional[Any] ) -> int:
pass
@slow
def __A ( self: Tuple ) -> Dict:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __A ( self: Any ) -> Union[str, Any]:
pass
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: int ) -> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __A ( self: Any ) -> Any:
_A = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__A )
_A = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is
_A = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
_A = model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 62 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_A = load_file(UpperCAmelCase__ )
_A = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_A = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_A = pipeline.text_encoder
else:
_A = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_A = pipeline.unet
# find the target layer
_A = layer_infos.pop(0 )
while len(UpperCAmelCase__ ) > -1:
try:
_A = curr_layer.__getattr__(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
_A = layer_infos.pop(0 )
elif len(UpperCAmelCase__ ) == 0:
break
except Exception:
if len(UpperCAmelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_A = layer_infos.pop(0 )
_A = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(UpperCAmelCase__ )
else:
pair_keys.append(UpperCAmelCase__ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_A = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_A = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
_A = state_dict[pair_keys[0]].to(torch.floataa )
_A = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ )
# update visited list
for item in pair_keys:
visited.append(UpperCAmelCase__ )
return pipeline
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
__A = parser.parse_args()
__A = args.base_model_path
__A = args.checkpoint_path
__A = args.dump_path
__A = args.lora_prefix_unet
__A = args.lora_prefix_text_encoder
__A = args.alpha
__A = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__A = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 710 |
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = True
_A = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowercase , _lowercase , _lowercase )
order.append(_lowercase )
return order
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = True
_A = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowercase , _lowercase , _lowercase )
return component
def __A ( _lowercase ):
'''simple docstring'''
_A = len(_lowercase ) * [False]
_A = {vert: [] for vert in range(len(_lowercase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowercase )
_A = []
for i, was_visited in enumerate(_lowercase ):
if not was_visited:
order += topology_sort(_lowercase , _lowercase , _lowercase )
_A = []
_A = len(_lowercase ) * [False]
for i in range(len(_lowercase ) ):
_A = order[len(_lowercase ) - i - 1]
if not visited[vert]:
_A = find_components(_lowercase , _lowercase , _lowercase )
components_list.append(_lowercase )
return components_list
| 62 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
A_ = IFImgaImgSuperResolutionPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
A_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self: Tuple ) -> int:
return self._get_superresolution_dummy_components()
def __A ( self: List[str] , __A: Tuple , __A: Dict=0 ) -> Optional[Any]:
if str(UpperCAmelCase_ ).startswith('''mps''' ):
_A = torch.manual_seed(UpperCAmelCase_ )
else:
_A = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_A = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_A = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self: Tuple ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __A ( self: Any ) -> str:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self: List[str] ) -> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __A ( self: Any ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __A ( self: Optional[int] ) -> List[Any]:
self._test_save_load_local()
def __A ( self: Tuple ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 711 |
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase )
else:
_A = max(
mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , )
_A = val
return f[i][j]
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_A = dp[i - 1][w_]
return dp[n][w_], dp
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
_A = len(_lowercase )
if num_items != len(_lowercase ):
_A = (
'''The number of weights must be the same as the number of values.\n'''
f"""But got {num_items} weights and {len(_lowercase )} values"""
)
raise ValueError(_lowercase )
for i in range(_lowercase ):
if not isinstance(wt[i] , _lowercase ):
_A = (
'''All weights must be integers but got weight of '''
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(_lowercase )
_A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase )
_A = set()
_construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return optimal_val, example_optional_set
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase )
else:
optimal_set.add(_lowercase )
_construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase )
if __name__ == "__main__":
__A = [3, 2, 4, 4]
__A = [4, 3, 2, 3]
__A = 4
__A = 6
__A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__A , __A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__A , __A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 62 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __A ( _lowercase , _lowercase=False ):
'''simple docstring'''
try:
_A = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_A = default
else:
# KEY is set, convert it to True or False.
try:
_A = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
__A = parse_flag_from_env('RUN_SLOW', default=False)
__A = parse_flag_from_env('RUN_REMOTE', default=False)
__A = parse_flag_from_env('RUN_LOCAL', default=True)
__A = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
__A = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
__A = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
__A = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
__A = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
__A = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
__A = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
__A = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __A ( _lowercase ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
_A = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __A ( _lowercase ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
_A = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __A ( _lowercase ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
_A = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __A ( _lowercase ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
_A = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __A ( _lowercase ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
_A = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __A ( _lowercase ):
'''simple docstring'''
if not config.TF_AVAILABLE:
_A = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __A ( _lowercase ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
_A = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __A ( _lowercase ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
_A = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __A ( _lowercase ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __A ( _lowercase ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __A ( _lowercase ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __A ( _lowercase ):
'''simple docstring'''
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __A ( _lowercase ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __A ( _lowercase ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __A ( _lowercase ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
_A = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __A ( _lowercase ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
_A = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __A ( _lowercase ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
_A = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __A ( _lowercase ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
_A = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __A ( *_lowercase ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
_A = decorator(_lowercase )
setattr(cls , _lowercase , _lowercase )
return cls
return decorate
class SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __A ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS , _lowercase=1e-16 ):
'''simple docstring'''
_A = requests.Session().request
def timeout_request(_lowercase , _lowercase , _lowercase , **_lowercase ):
# Change the url to an invalid url so that the connection hangs
_A = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_A = timeout
try:
return online_request(_lowercase , _lowercase , **_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_A = url
_A = e.args[0]
_A = (max_retry_error.args[0].replace('''10.255.255.1''' , f"""OfflineMock[{url}]""" ),)
_A = (max_retry_error,)
raise
def raise_connection_error(_lowercase , _lowercase , **_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __A ( *_lowercase , **_lowercase ):
'''simple docstring'''
_A = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase , **_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __A ( ):
'''simple docstring'''
import gc
gc.collect()
_A = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __A ( ):
'''simple docstring'''
import gc
gc.collect()
_A = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
return deepcopy(_lowercase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowercase ).integers(0 , 1_00 , 10 ).tolist()
def __A ( _lowercase ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase , *_lowercase , **_lowercase ):
try:
return func(*_lowercase , **_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper , _lowercase )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Optional[Any] , __A: str , __A: Tuple , __A: str ) -> Dict:
_A = returncode
_A = stdout
_A = stderr
async def __A ( _lowercase , _lowercase ):
'''simple docstring'''
while True:
_A = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowercase ) )
_A = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_A = []
_A = []
def tee(_lowercase , _lowercase , _lowercase , _lowercase="" ):
_A = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase , _lowercase , file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowercase : tee(_lowercase , _lowercase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowercase : tee(_lowercase , _lowercase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowercase , )
return _RunOutput(await p.wait() , _lowercase , _lowercase )
def __A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=1_80 , _lowercase=False , _lowercase=True ):
'''simple docstring'''
_A = asyncio.get_event_loop()
_A = loop.run_until_complete(
_stream_subprocess(_lowercase , env=_lowercase , stdin=_lowercase , timeout=_lowercase , quiet=_lowercase , echo=_lowercase ) )
_A = ''' '''.join(_lowercase )
if result.returncode > 0:
_A = '''\n'''.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def __A ( ):
'''simple docstring'''
_A = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
_A = re.sub(R'''^gw''' , '''''' , _lowercase , 0 , re.M )
return int(_lowercase )
def __A ( ):
'''simple docstring'''
_A = 2_95_00
_A = pytest_xdist_worker_id()
return port + uniq_delta
| 712 |
def __A ( _lowercase = 1_00_00_00 ):
'''simple docstring'''
_A = 1
_A = 1
_A = {1: 1}
for inputa in range(2 , _lowercase ):
_A = 0
_A = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_A = (3 * number) + 1
counter += 1
if inputa not in counters:
_A = counter
if counter > pre_counter:
_A = inputa
_A = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 62 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
__A = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
__A = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
__A = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __A ( self: Dict ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __A ( self: Dict ) -> int:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __A ( self: List[str] , __A: Tuple , __A: Union[str, Any] , __A: Tuple=None , __A: Tuple="uniform_average" , __A: Any=True ) -> List[str]:
_A = mean_squared_error(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sample_weight=_SCREAMING_SNAKE_CASE , multioutput=_SCREAMING_SNAKE_CASE , squared=_SCREAMING_SNAKE_CASE )
return {"mse": mse}
| 713 |
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = word.split()
def justify(_lowercase , _lowercase , _lowercase ) -> str:
_A = max_width - width
_A = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_A = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_A = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_A = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_A = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_A = []
_A = []
_A = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase , _lowercase , _lowercase ) )
# reset new line and new width
_A ,_A = [word], len(_lowercase )
_A = max_width - width - len(_lowercase )
answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__A = logging.getLogger()
__A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
def __A ( self: str , __A: List[str] ) -> Dict:
os.makedirs(__A , exist_ok=__A )
_A = {"""source""": """What is love ?""", """target""": """life"""}
_A = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_A = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(__A , f"""{split}.{field}""" ) , '''w''' ) as f:
f.write(__A )
def __A ( self: Optional[Any] , __A: int , __A: str = "pytorch" ) -> Any:
_A = self.get_auto_remove_tmp_dir()
_A = os.path.join(__A , '''output''' )
_A = os.path.join(__A , '''data''' )
self._create_dummy_data(data_dir=__A )
_A = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
_A = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__A , env=self.get_env() )
_A = os.path.join(__A , '''metrics.json''' )
with open(__A ) as f:
_A = json.load(__A )
return result
@require_torch_gpu
def __A ( self: Dict ) -> Optional[int]:
_A = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def __A ( self: Union[str, Any] ) -> Union[str, Any]:
_A = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def __A ( self: List[str] ) -> Union[str, Any]:
_A = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __A ( self: List[str] ) -> Any:
_A = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 714 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__A = '\\n Text data.\n Second line of data.'
__A = 'file'
@pytest.fixture(scope='''session''' )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_A = bytes(_lowercase , '''utf-8''' )
with zstd.open(_lowercase , '''wb''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture
def __A ( _lowercase ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f:
f.write(_lowercase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_A = input_paths[compression_format]
_A = tmp_path / '''cache'''
_A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase )
_A = cached_path(_lowercase , download_config=_lowercase )
with open(_lowercase ) as f:
_A = f.read()
with open(_lowercase ) as f:
_A = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = '''custom_cache'''
_A = '''custom_extracted_dir'''
_A = tmp_path / '''custom_extracted_path'''
if default_extracted:
_A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) )
_A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_A = xz_file
_A = (
DownloadConfig(extract_compressed_file=_lowercase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase )
)
_A = cached_path(_lowercase , download_config=_lowercase )
assert Path(_lowercase ).parent.parts[-2:] == expected
def __A ( _lowercase ):
'''simple docstring'''
_A = str(Path(_lowercase ).resolve() )
assert cached_path(_lowercase ) == text_file
# relative path
_A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowercase ) == text_file
def __A ( _lowercase ):
'''simple docstring'''
_A = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_lowercase ):
cached_path(_lowercase )
# relative path
_A = '''./__missing_file__.txt'''
with pytest.raises(_lowercase ):
cached_path(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(_lowercase ) as f:
_A = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( ):
'''simple docstring'''
with pytest.raises(_lowercase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
http_get('''https://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
fsspec_head('''s3://huggingface.co''' )
| 62 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Optional[Any] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_A = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowercase , cache_dir=__lowercase )
_A = [t[-1] for t in os.walk(os.path.join(__lowercase , os.listdir(__lowercase )[0] , '''snapshots''' ) )]
_A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Any ) -> int:
_A ,_A = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowercase )
_A = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_A = jax.random.PRNGKey(0 )
_A = 4
_A = jax.device_count()
_A = num_samples * [prompt]
_A = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_A = replicate(__lowercase )
_A = jax.random.split(__lowercase , __lowercase )
_A = shard(__lowercase )
_A = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(__lowercase , dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
_A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowercase ) == num_samples
def __A ( self: Union[str, Any] ) -> str:
_A ,_A = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__lowercase )
_A = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_A = jax.random.PRNGKey(0 )
_A = 50
_A = jax.device_count()
_A = num_samples * [prompt]
_A = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_A = replicate(__lowercase )
_A = jax.random.split(__lowercase , __lowercase )
_A = shard(__lowercase )
_A = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def __A ( self: int ) -> List[Any]:
_A ,_A = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase )
_A = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_A = jax.random.PRNGKey(0 )
_A = 50
_A = jax.device_count()
_A = num_samples * [prompt]
_A = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_A = replicate(__lowercase )
_A = jax.random.split(__lowercase , __lowercase )
_A = shard(__lowercase )
_A = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def __A ( self: Dict ) -> Tuple:
_A ,_A = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
_A = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_A = jax.random.PRNGKey(0 )
_A = 50
_A = jax.device_count()
_A = num_samples * [prompt]
_A = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_A = replicate(__lowercase )
_A = jax.random.split(__lowercase , __lowercase )
_A = shard(__lowercase )
_A = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def __A ( self: Union[str, Any] ) -> List[Any]:
_A = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
_A ,_A = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__lowercase , safety_checker=__lowercase , )
_A = scheduler.create_state()
_A = scheduler_state
_A = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_A = jax.random.PRNGKey(0 )
_A = 50
_A = jax.device_count()
_A = num_samples * [prompt]
_A = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_A = replicate(__lowercase )
_A = jax.random.split(__lowercase , __lowercase )
_A = shard(__lowercase )
_A = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def __A ( self: Dict ) -> Any:
_A = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_A = jax.device_count()
_A = num_samples * [prompt]
_A = jax.random.split(jax.random.PRNGKey(0 ) , __lowercase )
_A ,_A = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase , )
_A = replicate(__lowercase )
_A = pipeline.prepare_inputs(__lowercase )
_A = shard(__lowercase )
_A = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
_A = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
_A ,_A = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase , use_memory_efficient_attention=__lowercase , )
_A = replicate(__lowercase )
_A = pipeline.prepare_inputs(__lowercase )
_A = shard(__lowercase )
_A = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
_A = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 715 |
import math
def __A ( _lowercase ):
'''simple docstring'''
_A = []
_A = 2
_A = int(math.sqrt(_lowercase ) ) # Size of every segment
_A = [True] * (end + 1)
_A = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowercase )
for i in range(start * start , end + 1 , _lowercase ):
_A = False
start += 1
prime += in_prime
_A = end + 1
_A = min(2 * end , _lowercase )
while low <= n:
_A = [True] * (high - low + 1)
for each in in_prime:
_A = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowercase , high + 1 , _lowercase ):
_A = False
for j in range(len(_lowercase ) ):
if temp[j] is True:
prime.append(j + low )
_A = high + 1
_A = min(high + end , _lowercase )
return prime
print(sieve(10**6))
| 62 | 0 |
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowerCAmelCase ) )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if index == len(_lowerCAmelCase ):
return True
# Recursive Step
for i in range(_lowerCAmelCase ):
if valid_coloring(graph[index] , _lowerCAmelCase , _lowerCAmelCase ):
# Color current vertex
_A = i
# Validate coloring
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 ):
return True
# Backtrack
_A = -1
return False
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = [-1] * len(_lowerCAmelCase )
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 ):
return colored_vertices
return []
| 716 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = jnp.floataa
def __A ( self: Tuple ) -> Tuple:
_A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __A: Dict ) -> Tuple:
_A ,_A ,_A ,_A = hidden_states.shape
_A = jax.image.resize(
__A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
_A = self.conv(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = jnp.floataa
def __A ( self: List[str] ) -> Tuple:
_A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_A = self.conv(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = None
A_ = 0.0
A_ = None
A_ = jnp.floataa
def __A ( self: Dict ) -> Dict:
_A = self.in_channels if self.out_channels is None else self.out_channels
_A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_A = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_A = nn.Dense(__A , dtype=self.dtype )
_A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_A = nn.Dropout(self.dropout_prob )
_A = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_A = None
if use_nin_shortcut:
_A = nn.Conv(
__A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]:
_A = hidden_states
_A = self.norma(__A )
_A = nn.swish(__A )
_A = self.conva(__A )
_A = self.time_emb_proj(nn.swish(__A ) )
_A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 )
_A = hidden_states + temb
_A = self.norma(__A )
_A = nn.swish(__A )
_A = self.dropout(__A , __A )
_A = self.conva(__A )
if self.conv_shortcut is not None:
_A = self.conv_shortcut(__A )
return hidden_states + residual
| 62 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def __A ( _lowercase , _lowercase , _lowercase , _lowercase = 1_00 , ):
'''simple docstring'''
_A = x_start
_A = fnc(_lowercase )
_A = 0.0
for _ in range(_lowercase ):
# Approximates curve as a sequence of linear lines and sums their length
_A = (x_end - x_start) / steps + xa
_A = fnc(_lowercase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_A = xa
_A = fxa
return length
if __name__ == "__main__":
def __A ( _lowercase ):
'''simple docstring'''
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
__A = 10
while i <= 100000:
print(f'With {i} steps: {line_length(f, -10, 10, i)}')
i *= 10
| 717 |
def __A ( _lowercase ):
'''simple docstring'''
_A = [0] * len(_lowercase )
_A = []
_A = []
_A = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowercase ) ):
if indegree[i] == 0:
queue.append(_lowercase )
while queue:
_A = queue.pop(0 )
cnt += 1
topo.append(_lowercase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowercase )
if cnt != len(_lowercase ):
print('''Cycle exists''' )
else:
print(_lowercase )
# Adjacency List of Graph
__A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 62 | 0 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __A ( _lowercase , _lowercase , _lowercase = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
_A = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' , revision=__lowerCAmelCase )
| 718 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class SCREAMING_SNAKE_CASE ( snake_case , snake_case ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__A )
# standard deviation of the initial noise distribution
_A = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_A = 4
# running values
_A = []
def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int:
_A = num_inference_steps
_A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_A = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_A = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_A = torch.sin(steps * math.pi / 2 ) ** 2
_A = (1.0 - self.betas**2) ** 0.5
_A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_A = timesteps.to(__A )
_A = []
def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
_A = (self.timesteps == timestep).nonzero().item()
_A = timestep_index + 1
_A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__A )
if len(self.ets ) == 1:
_A = self.ets[-1]
elif len(self.ets ) == 2:
_A = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_A = self._get_prev_sample(__A , __A , __A , __A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor:
return sample
def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]:
_A = self.alphas[timestep_index]
_A = self.betas[timestep_index]
_A = self.alphas[prev_timestep_index]
_A = self.betas[prev_timestep_index]
_A = (sample - sigma * ets) / max(__A , 1e-8 )
_A = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[str] ) -> Dict:
return self.config.num_train_timesteps
| 62 | 0 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self: Dict ) -> List[Any]:
_A = []
def __A ( self: Any , __A: Dict , __A: Dict , __A: List[Any] , **__A: Dict ) -> int:
self.events.append('''on_init_end''' )
def __A ( self: Optional[int] , __A: Dict , __A: Optional[Any] , __A: Tuple , **__A: Any ) -> List[str]:
self.events.append('''on_train_begin''' )
def __A ( self: Dict , __A: Optional[Any] , __A: Tuple , __A: str , **__A: List[str] ) -> Dict:
self.events.append('''on_train_end''' )
def __A ( self: List[str] , __A: str , __A: int , __A: Optional[int] , **__A: Optional[Any] ) -> Tuple:
self.events.append('''on_epoch_begin''' )
def __A ( self: Optional[Any] , __A: Optional[int] , __A: List[str] , __A: Optional[Any] , **__A: Optional[Any] ) -> List[Any]:
self.events.append('''on_epoch_end''' )
def __A ( self: List[Any] , __A: Optional[Any] , __A: Any , __A: Tuple , **__A: Optional[Any] ) -> Optional[int]:
self.events.append('''on_step_begin''' )
def __A ( self: List[Any] , __A: Dict , __A: str , __A: Optional[Any] , **__A: Tuple ) -> Any:
self.events.append('''on_step_end''' )
def __A ( self: Dict , __A: Tuple , __A: Tuple , __A: List[Any] , **__A: Optional[Any] ) -> List[Any]:
self.events.append('''on_evaluate''' )
def __A ( self: Tuple , __A: Optional[Any] , __A: Any , __A: Optional[int] , **__A: int ) -> Tuple:
self.events.append('''on_predict''' )
def __A ( self: List[Any] , __A: Any , __A: List[str] , __A: Union[str, Any] , **__A: int ) -> str:
self.events.append('''on_save''' )
def __A ( self: Union[str, Any] , __A: str , __A: Dict , __A: Any , **__A: Any ) -> Optional[Any]:
self.events.append('''on_log''' )
def __A ( self: List[str] , __A: int , __A: Tuple , __A: str , **__A: Union[str, Any] ) -> List[Any]:
self.events.append('''on_prediction_step''' )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Tuple ) -> Union[str, Any]:
_A = tempfile.mkdtemp()
def __A ( self: Optional[Any] ) -> Any:
shutil.rmtree(self.output_dir )
def __A ( self: str , __A: Any=0 , __A: List[Any]=0 , __A: Optional[Any]=64 , __A: List[Any]=64 , __A: Optional[Any]=None , __A: int=False , **__A: List[Any] ) -> Union[str, Any]:
_A = RegressionDataset(length=lowerCAmelCase_ )
_A = RegressionDataset(length=lowerCAmelCase_ )
_A = RegressionModelConfig(a=lowerCAmelCase_ , b=lowerCAmelCase_ )
_A = RegressionPreTrainedModel(lowerCAmelCase_ )
_A = TrainingArguments(self.output_dir , disable_tqdm=lowerCAmelCase_ , report_to=[] , **lowerCAmelCase_ )
return Trainer(
lowerCAmelCase_ , lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , callbacks=lowerCAmelCase_ , )
def __A ( self: List[str] , __A: Dict , __A: int ) -> Optional[Any]:
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
# Order doesn't matter
_A = sorted(lowerCAmelCase_ , key=lambda __A : cb.__name__ if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cb.__class__.__name__ )
_A = sorted(lowerCAmelCase_ , key=lambda __A : cb.__name__ if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(lowerCAmelCase_ , cba.__class__ )
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(cba.__class__ , lowerCAmelCase_ )
else:
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( self: Union[str, Any] , __A: Union[str, Any] ) -> Optional[Any]:
_A = ['''on_init_end''', '''on_train_begin''']
_A = 0
_A = len(trainer.get_eval_dataloader() )
_A = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowerCAmelCase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self: Tuple ) -> List[str]:
_A = self.get_trainer()
_A = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
# Callbacks passed at init are added to the default callbacks
_A = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_A = self.get_trainer(disable_tqdm=lowerCAmelCase_ )
_A = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
def __A ( self: int ) -> List[Any]:
_A = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_A = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowerCAmelCase_ )
expected_callbacks.remove(lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
_A = self.get_trainer()
_A = trainer.pop_callback(lowerCAmelCase_ )
self.assertEqual(cb.__class__ , lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
trainer.add_callback(lowerCAmelCase_ )
expected_callbacks.insert(0 , lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
# We can also add, pop, or remove by instance
_A = self.get_trainer()
_A = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowerCAmelCase_ )
expected_callbacks.remove(lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
_A = self.get_trainer()
_A = trainer.callback_handler.callbacks[0]
_A = trainer.pop_callback(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
trainer.add_callback(lowerCAmelCase_ )
expected_callbacks.insert(0 , lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
def __A ( self: List[Any] ) -> Optional[int]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=lowerCAmelCase_ )
_A = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_A = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
# Independent log/save/eval
_A = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_A = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
_A = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_A = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
_A = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
_A = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
_A = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
_A = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
# A bit of everything
_A = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
_A = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
_A = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowerCAmelCase_ ) in warn_mock.call_args[0][0]
| 719 |
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A ,_A = len(_lowercase ), len(grid[0] )
if (
min(_lowercase , _lowercase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_A = 0
count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self: Any ) -> List[str]:
_A = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
_A = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
_A = """The dog is cute and lives in the garden house"""
_A = jnp.array([tokenizer.encode(__a )] )
_A = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
_A = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
_A = model(__a )["""last_hidden_state"""]
self.assertEqual(output.shape , __a )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __a , atol=1e-3 ) )
| 720 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__A = NewType('DataClass', Any)
__A = NewType('DataClassType', Any)
def __A ( _lowercase ):
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __A ( _lowercase ):
'''simple docstring'''
_A = {str(_lowercase ): choice for choice in choices}
return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase )
def __A ( *,
_lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_A = {}
if aliases is not None:
_A = aliases
if help is not None:
_A = help
return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 42
def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_A = ArgumentDefaultsHelpFormatter
super().__init__(**__A )
if dataclasses.is_dataclass(__A ):
_A = [dataclass_types]
_A = list(__A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__A )
@staticmethod
def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str:
_A = f"""--{field.name}"""
_A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __A ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
_A = kwargs.pop('''aliases''' , [] )
if isinstance(__A , __A ):
_A = [aliases]
_A = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__A ) not in field.type.__args__:
# filter `str` in Union
_A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_A = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_A = (
field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1]
)
_A = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_A = {}
if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )):
if origin_type is Literal:
_A = field.type.__args__
else:
_A = [x.value for x in field.type]
_A = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
_A = field.default
else:
_A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_A = copy(__A )
# Hack because type=bool in argparse does not behave as we want.
_A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_A = default
# This tells argparse we accept 0 or 1 value after --field_name
_A = '''?'''
# This is the value that will get picked if we do --field_name (without value)
_A = True
elif isclass(__A ) and issubclass(__A , __A ):
_A = field.type.__args__[0]
_A = '''+'''
if field.default_factory is not dataclasses.MISSING:
_A = field.default_factory()
elif field.default is dataclasses.MISSING:
_A = True
else:
_A = field.type
if field.default is not dataclasses.MISSING:
_A = field.default
elif field.default_factory is not dataclasses.MISSING:
_A = field.default_factory()
else:
_A = True
parser.add_argument(__A , *__A , **__A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_A = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A )
def __A ( self: Dict , __A: DataClassType ) -> List[Any]:
if hasattr(__A , '''_argument_group_name''' ):
_A = self.add_argument_group(dtype._argument_group_name )
else:
_A = self
try:
_A = get_type_hints(__A )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ):
_A = '''.'''.join(map(__A , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__A ):
if not field.init:
continue
_A = type_hints[field.name]
self._parse_dataclass_field(__A , __A )
def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_A = []
if args_filename:
args_files.append(Path(__A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_A = ArgumentParser()
args_file_parser.add_argument(__A , type=__A , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
_A ,_A = args_file_parser.parse_known_args(args=__A )
_A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A )
if cmd_args_file_paths:
args_files.extend([Path(__A ) for p in cmd_args_file_paths] )
_A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_A = file_args + args if args is not None else file_args + sys.argv[1:]
_A ,_A = self.parse_known_args(args=__A )
_A = []
for dtype in self.dataclass_types:
_A = {f.name for f in dataclasses.fields(__A ) if f.init}
_A = {k: v for k, v in vars(__A ).items() if k in keys}
for k in keys:
delattr(__A , __A )
_A = dtype(**__A )
outputs.append(__A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]:
_A = set(args.keys() )
_A = []
for dtype in self.dataclass_types:
_A = {f.name for f in dataclasses.fields(__A ) if f.init}
_A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_A = dtype(**__A )
outputs.append(__A )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" )
return tuple(__A )
def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]:
with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file:
_A = json.loads(open_json_file.read() )
_A = self.parse_dict(__A , allow_extra_keys=__A )
return tuple(__A )
def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]:
_A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A )
return tuple(__A )
| 62 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "time_series_transformer"
A_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self: Optional[int] , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "student_t" , __A: str = "nll" , __A: int = 1 , __A: List[int] = [1, 2, 3, 4, 5, 6, 7] , __A: Optional[Union[str, bool]] = "mean" , __A: int = 0 , __A: int = 0 , __A: int = 0 , __A: int = 0 , __A: Optional[List[int]] = None , __A: Optional[List[int]] = None , __A: int = 32 , __A: int = 32 , __A: int = 2 , __A: int = 2 , __A: int = 2 , __A: int = 2 , __A: bool = True , __A: str = "gelu" , __A: int = 64 , __A: float = 0.1 , __A: float = 0.1 , __A: float = 0.1 , __A: float = 0.1 , __A: float = 0.1 , __A: int = 1_00 , __A: float = 0.02 , __A: Union[str, Any]=True , **__A: Tuple , ) -> Optional[int]:
# time series specific configuration
_A = prediction_length
_A = context_length or prediction_length
_A = distribution_output
_A = loss
_A = input_size
_A = num_time_features
_A = lags_sequence
_A = scaling
_A = num_dynamic_real_features
_A = num_static_real_features
_A = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_A = cardinality
else:
_A = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_A = embedding_dimension
else:
_A = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_A = num_parallel_samples
# Transformer architecture configuration
_A = input_size * len(UpperCAmelCase__ ) + self._number_of_features
_A = d_model
_A = encoder_attention_heads
_A = decoder_attention_heads
_A = encoder_ffn_dim
_A = decoder_ffn_dim
_A = encoder_layers
_A = decoder_layers
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = activation_function
_A = init_std
_A = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def __A ( self: List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 721 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str:
_A = parent
_A = batch_size
_A = is_training
_A = use_auxiliary_loss
_A = num_queries
_A = num_channels
_A = min_size
_A = max_size
_A = num_labels
_A = mask_feature_size
def __A ( self: Dict ) -> Optional[int]:
_A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__A )
_A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A )
_A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5
).float()
_A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long()
_A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __A ( self: Optional[Any] ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __A ( self: Dict ) -> Tuple:
_A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs()
_A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int:
_A = output.encoder_hidden_states
_A = output.pixel_decoder_hidden_states
_A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers )
def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any:
with torch.no_grad():
_A = MaskFormerModel(config=__A )
model.to(__A )
model.eval()
_A = model(pixel_values=__A , pixel_mask=__A )
_A = model(__A , output_hidden_states=__A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__A , __A )
def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int:
_A = MaskFormerForInstanceSegmentation(config=__A )
model.to(__A )
model.eval()
def comm_check_on_output(__A: int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_A = model(pixel_values=__A , pixel_mask=__A )
_A = model(__A )
comm_check_on_output(__A )
_A = model(
pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A )
comm_check_on_output(__A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def __A ( self: int ) -> Tuple:
_A = MaskFormerModelTester(self )
_A = ConfigTester(self , config_class=__A , has_text_modality=__A )
def __A ( self: List[Any] ) -> Dict:
self.config_tester.run_common_tests()
def __A ( self: Optional[Any] ) -> int:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A )
def __A ( self: Dict ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def __A ( self: int ) -> Tuple:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def __A ( self: List[Any] ) -> Any:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def __A ( self: Union[str, Any] ) -> Optional[int]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def __A ( self: int ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __A ( self: Union[str, Any] ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: List[Any] ) -> Any:
pass
def __A ( self: Dict ) -> Optional[Any]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
@slow
def __A ( self: int ) -> Optional[Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_A = MaskFormerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __A ( self: Optional[Any] ) -> Optional[int]:
_A = (self.model_tester.min_size,) * 2
_A = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__A ),
'''mask_labels''': torch.randn((2, 10, *size) , device=__A ),
'''class_labels''': torch.zeros(2 , 10 , device=__A ).long(),
}
_A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A )
_A = model(**__A )
self.assertTrue(outputs.loss is not None )
def __A ( self: Optional[Any] ) -> List[Any]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A )
def __A ( self: Any ) -> Tuple:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A ).to(__A )
_A = model(**__A , output_attentions=__A )
self.assertTrue(outputs.attentions is not None )
def __A ( self: Dict ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_A = self.all_model_classes[1]
_A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs()
_A = model_class(__A )
model.to(__A )
model.train()
_A = model(__A , mask_labels=__A , class_labels=__A ).loss
loss.backward()
def __A ( self: Tuple ) -> Optional[Any]:
# only MaskFormerForInstanceSegmentation has the loss
_A = self.all_model_classes[1]
_A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs()
_A = True
_A = True
_A = model_class(__A )
model.to(__A )
model.train()
_A = model(__A , mask_labels=__A , class_labels=__A )
_A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def __A ( ):
'''simple docstring'''
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self: Union[str, Any] ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def __A ( self: List[Any] ) -> Any:
_A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__A , return_tensors='''pt''' ).to(__A )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**__A )
_A = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) )
_A = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) )
_A = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) )
def __A ( self: Dict ) -> Dict:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__A )
.eval()
)
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__A , return_tensors='''pt''' ).to(__A )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**__A )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_A = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
_A = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) )
def __A ( self: List[Any] ) -> Dict:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__A )
.eval()
)
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__A , return_tensors='''pt''' ).to(__A )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**__A )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
_A = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) )
def __A ( self: Optional[Any] ) -> str:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__A )
.eval()
)
_A = self.default_image_processor
_A = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
_A = inputs['''pixel_values'''].to(__A )
_A = [el.to(__A ) for el in inputs['''mask_labels''']]
_A = [el.to(__A ) for el in inputs['''class_labels''']]
with torch.no_grad():
_A = model(**__A )
self.assertTrue(outputs.loss is not None )
| 62 | 0 |
import fire
from utils import calculate_rouge, save_json
def __A ( _lowercase , _lowercase , _lowercase=None , **_lowercase ):
'''simple docstring'''
_A = [x.strip() for x in open(_lowercase ).readlines()]
_A = [x.strip() for x in open(_lowercase ).readlines()][: len(_lowercase )]
_A = calculate_rouge(_lowercase , _lowercase , **_lowercase )
if save_path is not None:
save_json(_lowercase , _lowercase , indent=_lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 700 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str:
_A = question_encoder
_A = generator
_A = self.question_encoder
def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict:
if os.path.isfile(__A ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__A , exist_ok=__A )
_A = os.path.join(__A , '''question_encoder_tokenizer''' )
_A = os.path.join(__A , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__A )
self.generator.save_pretrained(__A )
@classmethod
def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_A = kwargs.pop('''config''' , __A )
if config is None:
_A = RagConfig.from_pretrained(__A )
_A = AutoTokenizer.from_pretrained(
__A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
_A = AutoTokenizer.from_pretrained(
__A , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__A , generator=__A )
def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int:
return self.current_tokenizer(*__A , **__A )
def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict:
return self.generator.batch_decode(*__A , **__A )
def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple:
return self.generator.decode(*__A , **__A )
def __A ( self: Dict ) -> List[str]:
_A = self.question_encoder
def __A ( self: Union[str, Any] ) -> int:
_A = self.generator
def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __A , )
if max_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
__A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , )
_A = labels['''input_ids''']
return model_inputs
| 62 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 'deit'
def __init__( self: Any , __A: Union[str, Any]=7_68 , __A: Optional[Any]=12 , __A: Union[str, Any]=12 , __A: Optional[int]=30_72 , __A: Optional[int]="gelu" , __A: Optional[Any]=0.0 , __A: List[Any]=0.0 , __A: int=0.02 , __A: List[str]=1e-12 , __A: Optional[int]=2_24 , __A: Tuple=16 , __A: List[Any]=3 , __A: List[str]=True , __A: Any=16 , **__A: Union[str, Any] , ) -> int:
super().__init__(**__A )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = encoder_stride
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = version.parse("1.11" )
@property
def __A ( self: int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A ( self: Any ) -> float:
return 1e-4
| 701 |
from __future__ import annotations
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741
'''simple docstring'''
while r - l > 1:
_A = (l + r) // 2
if v[m] >= key:
_A = m
else:
_A = m # noqa: E741
return r
def __A ( _lowercase ):
'''simple docstring'''
if len(_lowercase ) == 0:
return 0
_A = [0] * len(_lowercase )
_A = 1
_A = v[0]
for i in range(1 , len(_lowercase ) ):
if v[i] < tail[0]:
_A = v[i]
elif v[i] > tail[length - 1]:
_A = v[i]
length += 1
else:
_A = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
__A = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __A ( _lowercase , _lowercase , _lowercase=8 ):
'''simple docstring'''
_A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __A ( _lowercase , _lowercase=5_12 , _lowercase=5_12 ):
'''simple docstring'''
_A = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_A = np.array(pil_image.convert('''RGB''' ) )
_A = arr.astype(np.floataa ) / 1_27.5 - 1
_A = np.transpose(lowercase__ , [2, 0, 1] )
_A = torch.from_numpy(lowercase__ ).unsqueeze(0 )
return image
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __init__( self: int , __A: Union[str, Any] , __A: int , __A: Optional[int] , ) -> int:
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
_A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self: Dict , __A: Optional[Any] , __A: Optional[Any] , __A: int ) -> str:
_A = min(int(num_inference_steps * strength ) , __UpperCamelCase )
_A = max(num_inference_steps - init_timestep , 0 )
_A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self: List[str] , __A: str , __A: List[str] , __A: Optional[Any] , __A: Dict , __A: int , __A: Union[str, Any] , __A: Optional[int]=None ) -> Any:
if not isinstance(__UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__UpperCamelCase )}""" )
_A = image.to(device=__UpperCamelCase , dtype=__UpperCamelCase )
_A = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A = image
else:
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_A = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__UpperCamelCase )
]
_A = torch.cat(__UpperCamelCase , dim=0 )
else:
_A = self.movq.encode(__UpperCamelCase ).latent_dist.sample(__UpperCamelCase )
_A = self.movq.config.scaling_factor * init_latents
_A = torch.cat([init_latents] , dim=0 )
_A = init_latents.shape
_A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
# get latents
_A = self.scheduler.add_noise(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_A = init_latents
return latents
def __A ( self: List[Any] , __A: List[str]=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_A = torch.device(f"""cuda:{gpu_id}""" )
_A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def __A ( self: int , __A: str=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_A = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A ,_A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
_A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self: Dict ) -> Dict:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self: Dict , __A: str , __A: List[Any] , __A: List[Any] , __A: Optional[int] = 5_12 , __A: Union[str, Any] = 5_12 , __A: List[str] = 1_00 , __A: Any = 4.0 , __A: Dict = 0.3 , __A: Optional[int] = 1 , __A: Union[str, Any] = None , __A: Dict = "pil" , __A: Dict = True , ) -> List[Any]:
_A = self._execution_device
_A = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_A = torch.cat(__UpperCamelCase , dim=0 )
_A = image_embeds.shape[0]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_A = torch.cat(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
_A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
_A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
_A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
_A = [image]
if not all(isinstance(__UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(__UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_A = torch.cat([prepare_image(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for i in image] , dim=0 )
_A = image.to(dtype=image_embeds.dtype , device=__UpperCamelCase )
_A = self.movq.encode(__UpperCamelCase )['''latents''']
_A = latents.repeat_interleave(__UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
_A ,_A = self.get_timesteps(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_A = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_A ,_A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
_A = self.prepare_latents(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = {'''image_embeds''': image_embeds}
_A = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
_A ,_A = noise_pred.split(latents.shape[1] , dim=1 )
_A ,_A = noise_pred.chunk(2 )
_A ,_A = variance_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A ,_A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
_A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_A = image * 0.5 + 0.5
_A = image.clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 702 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "sequence-classification"
def __init__( self: str , __A: Union[str, Any] ) -> List[str]:
if type(__A ) == dict:
_A = Namespace(**__A )
_A = glue_output_modes[hparams.task]
_A = glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]:
return self.model(**__A )
def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]:
_A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_A = self(**__A )
_A = outputs[0]
_A = self.trainer.lr_schedulers[0]['''scheduler''']
_A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __A ( self: List[str] ) -> Dict:
_A = self.hparams
_A = processors[args.task]()
_A = processor.get_labels()
for mode in ["train", "dev"]:
_A = self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __A )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
_A = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
_A = convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __A )
torch.save(__A , __A )
def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader:
_A = '''dev''' if mode == '''test''' else mode
_A = self._feature_file(__A )
logger.info('''Loading features from cached file %s''' , __A )
_A = torch.load(__A )
_A = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_A = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_A = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def __A ( self: List[str] , __A: str , __A: Tuple ) -> str:
_A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_A = self(**__A )
_A ,_A = outputs[:2]
_A = logits.detach().cpu().numpy()
_A = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __A ( self: str , __A: Dict ) -> tuple:
_A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
_A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_A = np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_A = np.squeeze(__A )
_A = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
_A = [[] for _ in range(out_label_ids.shape[0] )]
_A = [[] for _ in range(out_label_ids.shape[0] )]
_A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_A = dict(results.items() )
_A = results
return ret, preds_list, out_label_list
def __A ( self: Any , __A: list ) -> dict:
_A ,_A ,_A = self._eval_end(__A )
_A = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __A ( self: int , __A: Union[str, Any] ) -> dict:
_A ,_A ,_A = self._eval_end(__A )
_A = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=__A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __A ( ):
'''simple docstring'''
_A = argparse.ArgumentParser()
add_generic_args(_lowercase , os.getcwd() )
_A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() )
_A = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_A = os.path.join(
'''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , )
os.makedirs(args.output_dir )
_A = GLUETransformer(_lowercase )
_A = generic_train(_lowercase , _lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) )
_A = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowercase )
if __name__ == "__main__":
main()
| 62 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 703 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __A ( _lowercase = "" ):
'''simple docstring'''
_A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
_A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' )
_A = soup.find_all('''td''' , attrs='''titleColumn''' )
_A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowercase , _lowercase )
}
def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
_A = get_imdb_top_aaa_movies()
with open(_lowercase , '''w''' , newline='''''' ) as out_file:
_A = csv.writer(_lowercase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 62 | 0 |
__A = 8.314_462 # Unit - J mol-1 K-1
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = BlenderbotSmallTokenizer
A_ = False
def __A ( self: List[str] ) -> int:
super().setUp()
_A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def __A ( self: str , **__A: Optional[Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: str , __A: List[str] ) -> int:
_A = '''adapt act apte'''
_A = '''adapt act apte'''
return input_text, output_text
def __A ( self: Union[str, Any] ) -> Any:
_A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = '''adapt act apte'''
_A = ['''adapt''', '''act''', '''ap@@''', '''te''']
_A = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def __A ( self: Any ) -> List[str]:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
_A = '''I am a small frog.'''
_A = tok([src_text] , padding=__A , truncation=__A )['''input_ids''']
_A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __A ( self: Any ) -> int:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_A = '''I am a small frog .'''
_A = '''.'''
_A = tok(__A )['''input_ids''']
_A = tok(__A )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 62 | 0 |
from maths.prime_factors import prime_factors
def __A ( _lowercase ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
_A = f"""Input value of [number={number}] must be an integer"""
raise TypeError(a_ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "roberta"
def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@property
def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 62 | 0 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __A ( _lowercase ):
'''simple docstring'''
_A = []
for line in lines:
_A = re.sub(R'''#.*''' , '''''' , _lowercase ) # remove comments
if line:
filtered_lines.append(_lowercase )
_A = '''\n'''.join(_lowercase )
# Make a hash from all this code
_A = full_str.encode('''utf-8''' )
return shaaaa(_lowercase ).hexdigest()
# get importable module names and hash for caching
__A = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__A = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__A = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__A = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 706 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__A = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]:
super().__init__(*__A , **__A )
_A = eval_examples
_A = post_process_function
_A = quant_trainer_args
_A = 1_28 # default number of calibration samples
def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
_A = calib_dataset if calib_dataset is not None else self.calib_dataset
_A = self._remove_unused_columns(__A , description='''Calibration''' )
return DataLoader(
__A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , )
def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]:
_A = self.train_dataset if calib_dataset is None else calib_dataset
_A = self.get_calib_dataloader(__A )
_A = self.model
quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A )
model.eval()
quant_trainer.enable_calibration(__A )
logger.info('''***** Running calibration *****''' )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(__A ):
# Prediction step
_A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__A , self.quant_trainer_args )
_A = model
def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int:
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(__A )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
__A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
_A = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_A = self.post_process_function(__A , __A , output.predictions )
_A = self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_A = metrics.pop(__A )
self.log(__A )
else:
_A = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A )
return metrics
def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]:
_A = self.get_test_dataloader(__A )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
__A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
_A = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(__A , __A , output.predictions , '''predict''' )
_A = self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_A = metrics.pop(__A )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A )
def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]:
_A = self.eval_dataset
_A = self.get_eval_dataloader(__A )
_A = next(iter(__A ) )
# saving device - to make it consistent
_A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
_A = tuple(v.to(__A ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
_A = True
_A = self.model.to(__A )
model.eval()
model.float()
_A = model.module if hasattr(__A , '''module''' ) else model
quant_trainer.configure_model(__A , self.quant_trainer_args )
_A = os.path.join(__A , '''model.onnx''' )
logger.info(f"""exporting model to {output_model_file}""" )
_A = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__A , )
logger.info('''onnx export finished''' )
| 62 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import math
import tensorflow as tf
from packaging import version
def __A ( _lowercase ):
'''simple docstring'''
_A = tf.convert_to_tensor(__UpperCamelCase )
_A = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __A ( _lowercase ):
'''simple docstring'''
_A = tf.convert_to_tensor(__UpperCamelCase )
_A = tf.cast(math.pi , x.dtype )
_A = tf.cast(0.04_47_15 , x.dtype )
_A = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __A ( _lowercase ):
'''simple docstring'''
_A = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __A ( _lowercase ):
'''simple docstring'''
_A = tf.convert_to_tensor(__UpperCamelCase )
_A = tf.cast(0.04_47_15 , x.dtype )
_A = tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __A ( _lowercase ):
'''simple docstring'''
_A = tf.convert_to_tensor(__UpperCamelCase )
_A = tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __A ( _lowercase ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -10 , 10 )
def __A ( _lowercase , _lowercase=-1 ):
'''simple docstring'''
_A ,_A = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def __A ( _lowercase ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__A = tf.keras.activations.gelu
__A = approximate_gelu_wrap
else:
__A = _gelu
__A = _gelu_new
__A = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def __A ( _lowercase ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 708 |
import itertools
import string
from collections.abc import Generator, Iterable
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = iter(_lowercase )
while True:
_A = tuple(itertools.islice(_lowercase , _lowercase ) )
if not chunk:
return
yield chunk
def __A ( _lowercase ):
'''simple docstring'''
_A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_A = ''''''
if len(_lowercase ) < 2:
return dirty
for i in range(len(_lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowercase ) & 1:
clean += "X"
return clean
def __A ( _lowercase ):
'''simple docstring'''
_A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_A = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowercase )
return table
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = generate_table(_lowercase )
_A = prepare_input(_lowercase )
_A = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
_A ,_A = divmod(table.index(_lowercase ) , 5 )
_A ,_A = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = generate_table(_lowercase )
_A = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
_A ,_A = divmod(table.index(_lowercase ) , 5 )
_A ,_A = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 62 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A = 16
__A = 32
def __A ( _lowercase : str , _lowercase : List[str] = 16 , _lowercase : Union[str, Any] = "bert-base-cased" ):
'''simple docstring'''
_A = AutoTokenizer.from_pretrained(_lowercase )
_A = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_lowercase : str ):
# max_length=None => use the model max length (it's actually the default)
_A = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_A = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_A = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
_A = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def __A ( _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : Tuple ):
'''simple docstring'''
model.eval()
_A = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_A = model(**_lowercase )
_A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_A ,_A = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
_A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
_A = metric.compute()
return eval_metric["accuracy"]
def __A ( _lowercase : Optional[int] , _lowercase : Dict ):
'''simple docstring'''
_A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A = config['''lr''']
_A = int(config['''num_epochs'''] )
_A = int(config['''seed'''] )
_A = int(config['''batch_size'''] )
_A = args.model_name_or_path
set_seed(_lowercase )
_A ,_A = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
_A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_A = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
_A = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_A = 1
_A = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_A = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
_A = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A ,_A ,_A ,_A ,_A = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
_A = 0
# We also need to keep track of the stating epoch so files are named properly
_A = 0
_A = evaluate.load('''glue''' , '''mrpc''' )
_A = num_epochs
if args.partial_train_epoch is not None:
_A = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_A = args.resume_from_checkpoint.split('''epoch_''' )[1]
_A = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_A = int(_lowercase ) + 1
_A = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.print('''resumed checkpoint performance:''' , _lowercase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , '''r''' ) as f:
_A = json.load(_lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_A = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
_A = model(**_lowercase )
_A = outputs.loss
_A = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_A = f"""epoch_{epoch}"""
_A = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
_A = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
_A = accuracy
_A = lr_scheduler.get_lr()[0]
_A = optimizer.param_groups[0]['''lr''']
_A = epoch
_A = overall_step
accelerator.print(f"""epoch {epoch}:""" , _lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
def __A ( ):
'''simple docstring'''
_A = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_lowercase , default=_lowercase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=_lowercase , default=_lowercase , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowercase , default=2 , help='''Number of train epochs.''' , )
_A = parser.parse_args()
_A = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 709 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_token_type_ids
_A = use_input_mask
_A = use_labels
_A = use_mc_token_ids
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = self.vocab_size - 1
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
if self.use_mc_token_ids:
_A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
_A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self: Optional[int] ) -> List[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]:
_A = CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
_A = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str:
_A = CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
_A = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self: Optional[int] ) -> Dict:
_A = self.prepare_config_and_inputs()
(
(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,
) = config_and_inputs
_A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any:
_A = self.num_labels
_A = CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
A_ = (CTRLLMHeadModel,) if is_torch_available() else ()
A_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __A ( self: Any ) -> Union[str, Any]:
_A = CTRLModelTester(self )
_A = ConfigTester(self , config_class=__A , n_embd=37 )
def __A ( self: Optional[int] ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __A ( self: Dict ) -> Any:
self.config_tester.run_common_tests()
def __A ( self: str ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def __A ( self: List[str] ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: Optional[Any] ) -> int:
pass
@slow
def __A ( self: Tuple ) -> Dict:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __A ( self: Any ) -> Union[str, Any]:
pass
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: int ) -> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __A ( self: Any ) -> Any:
_A = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__A )
_A = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is
_A = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
_A = model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 62 | 0 |
from __future__ import annotations
def __A ( _lowercase ):
'''simple docstring'''
_A = 0.00
_A = 0
for resistor in resistors:
if resistor <= 0:
_A = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(snake_case_ )
first_sum += 1 / float(snake_case_ )
index += 1
return 1 / first_sum
def __A ( _lowercase ):
'''simple docstring'''
_A = 0.00
_A = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_A = f"""Resistor at index {index} has a negative value!"""
raise ValueError(snake_case_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = True
_A = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowercase , _lowercase , _lowercase )
order.append(_lowercase )
return order
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = True
_A = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowercase , _lowercase , _lowercase )
return component
def __A ( _lowercase ):
'''simple docstring'''
_A = len(_lowercase ) * [False]
_A = {vert: [] for vert in range(len(_lowercase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowercase )
_A = []
for i, was_visited in enumerate(_lowercase ):
if not was_visited:
order += topology_sort(_lowercase , _lowercase , _lowercase )
_A = []
_A = len(_lowercase ) * [False]
for i in range(len(_lowercase ) ):
_A = order[len(_lowercase ) - i - 1]
if not visited[vert]:
_A = find_components(_lowercase , _lowercase , _lowercase )
components_list.append(_lowercase )
return components_list
| 62 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __A ( _lowercase = "laptop" ):
'''simple docstring'''
_A = f"""https://www.amazon.in/laptop/s?k={product}"""
_A = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_A = BeautifulSoup(requests.get(__UpperCamelCase , headers=__UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_A = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
_A = item.ha.text
_A = '''https://www.amazon.in/''' + item.ha.a['''href''']
_A = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
_A = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
_A = '''Not available'''
try:
_A = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
_A = ''''''
try:
_A = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
_A = float('''nan''' )
except AttributeError:
pass
_A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_A = ''' '''
_A = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__A = 'headphones'
get_amazon_product_data(product).to_csv(f'Amazon Product Data for {product}.csv')
| 711 |
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase )
else:
_A = max(
mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , )
_A = val
return f[i][j]
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_A = dp[i - 1][w_]
return dp[n][w_], dp
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
_A = len(_lowercase )
if num_items != len(_lowercase ):
_A = (
'''The number of weights must be the same as the number of values.\n'''
f"""But got {num_items} weights and {len(_lowercase )} values"""
)
raise ValueError(_lowercase )
for i in range(_lowercase ):
if not isinstance(wt[i] , _lowercase ):
_A = (
'''All weights must be integers but got weight of '''
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(_lowercase )
_A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase )
_A = set()
_construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return optimal_val, example_optional_set
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase )
else:
optimal_set.add(_lowercase )
_construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase )
if __name__ == "__main__":
__A = [3, 2, 4, 4]
__A = [4, 3, 2, 3]
__A = 4
__A = 6
__A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__A , __A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__A , __A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 62 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A = 'pt'
elif is_tf_available():
__A = 'tf'
else:
__A = 'jax'
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = ByTaTokenizer
A_ = False
def __A ( self: Dict ) -> List[str]:
super().setUp()
_A = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self: Tuple ) -> Tuple:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def __A ( self: int , **__A: Any ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def __A ( self: List[Any] , __A: Dict , __A: str=False , __A: Any=20 , __A: Tuple=5 ) -> Tuple[str, list]:
_A = []
for i in range(len(_lowerCamelCase ) ):
try:
_A = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_A = list(filter(lambda __A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _lowerCamelCase ) )
_A = list(filter(lambda __A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCamelCase ) , _lowerCamelCase ) )
if max_length is not None and len(_lowerCamelCase ) > max_length:
_A = toks[:max_length]
if min_length is not None and len(_lowerCamelCase ) < min_length and len(_lowerCamelCase ) > 0:
while len(_lowerCamelCase ) < min_length:
_A = toks + toks
# toks_str = [t[1] for t in toks]
_A = [t[0] for t in toks]
# Ensure consistency
_A = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
if " " not in output_txt and len(_lowerCamelCase ) > 1:
_A = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCamelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCamelCase )
)
if with_prefix_space:
_A = ''' ''' + output_txt
_A = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
return output_txt, output_ids
def __A ( self: Dict ) -> Any:
_A = self.ta_base_tokenizer
_A = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
_A = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def __A ( self: str ) -> Optional[int]:
_A = self.ta_base_tokenizer
_A = '''Unicode €.'''
_A = tokenizer(_lowerCamelCase )
_A = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _lowerCamelCase )
# decoding
_A = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , '''Unicode €.</s>''' )
_A = tokenizer('''e è é ê ë''' )
_A = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['''input_ids'''] , _lowerCamelCase )
# decoding
_A = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def __A ( self: Union[str, Any] ) -> List[Any]:
_A = self.ta_base_tokenizer
_A = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
_A = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
_A = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
if FRAMEWORK != "jax":
_A = list(batch.input_ids.numpy()[0] )
else:
_A = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __A ( self: Optional[Any] ) -> List[Any]:
_A = self.ta_base_tokenizer
_A = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_A = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _lowerCamelCase )
self.assertIn('''attention_mask''' , _lowerCamelCase )
self.assertNotIn('''decoder_input_ids''' , _lowerCamelCase )
self.assertNotIn('''decoder_attention_mask''' , _lowerCamelCase )
def __A ( self: Union[str, Any] ) -> int:
_A = self.ta_base_tokenizer
_A = [
'''Summary of the text.''',
'''Another summary.''',
]
_A = tokenizer(
text_target=_lowerCamelCase , max_length=32 , padding='''max_length''' , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __A ( self: Union[str, Any] ) -> Optional[int]:
_A = self.ta_base_tokenizer
_A = ['''A long paragraph for summarization. </s>''']
_A = ['''Summary of the text. </s>''']
# fmt: off
_A = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
_A = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
_A = tokenizer(_lowerCamelCase , text_target=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , batch['''input_ids'''][0] )
self.assertEqual(_lowerCamelCase , batch['''labels'''][0] )
def __A ( self: Optional[Any] ) -> List[str]:
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_A = tempfile.mkdtemp()
_A = ''' He is very happy, UNwant\u00E9d,running'''
_A = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
_A = tokenizer.__class__.from_pretrained(_lowerCamelCase )
_A = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
_A = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_A = tempfile.mkdtemp()
_A = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
_A = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
_A = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
_A = tokenizer.__class__.from_pretrained(_lowerCamelCase )
_A = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_A = tokenizer.__class__.from_pretrained(_lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCamelCase )
def __A ( self: Optional[Any] ) -> str:
_A = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
_A = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
_A = json.load(_lowerCamelCase )
_A = [f"""<extra_id_{i}>""" for i in range(1_25 )]
_A = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
_A = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_lowerCamelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
with open(os.path.join(_lowerCamelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_A = tokenizer_class.from_pretrained(
_lowerCamelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_A = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_lowerCamelCase )]
_A = tokenizer_class.from_pretrained(
_lowerCamelCase , additional_special_tokens=_lowerCamelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def __A ( self: int ) -> Optional[Any]:
_A = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
_A = tokenizer_class.from_pretrained(_lowerCamelCase )
self.assertTrue(tokenizer.decode([2_55] ) == '''''' )
def __A ( self: Tuple ) -> List[str]:
pass
def __A ( self: Any ) -> Union[str, Any]:
pass
def __A ( self: Tuple ) -> List[Any]:
pass
def __A ( self: Any ) -> Union[str, Any]:
pass
def __A ( self: str ) -> str:
_A = self.get_tokenizers(fast=_lowerCamelCase , do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_A = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
_A = tokenizer.convert_tokens_to_string(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def __A ( self: Dict ) -> Any:
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_A = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
_A = 0
_A = tokenizer.convert_ids_to_tokens(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
for attr in attributes_list:
setattr(_lowerCamelCase , attr + '''_id''' , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , attr + '''_id''' ) , _lowerCamelCase )
setattr(_lowerCamelCase , attr + '''_id''' , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , attr + '''_id''' ) , _lowerCamelCase )
setattr(_lowerCamelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_lowerCamelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_lowerCamelCase , '''additional_special_tokens_ids''' ) , [] )
setattr(_lowerCamelCase , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_lowerCamelCase , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_lowerCamelCase , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 712 |
def __A ( _lowercase = 1_00_00_00 ):
'''simple docstring'''
_A = 1
_A = 1
_A = {1: 1}
for inputa in range(2 , _lowercase ):
_A = 0
_A = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_A = (3 * number) + 1
counter += 1
if inputa not in counters:
_A = counter
if counter > pre_counter:
_A = inputa
_A = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 62 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
def __A ( _lowercase ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__snake_case , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__snake_case ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class SCREAMING_SNAKE_CASE ( _A ):
"""simple docstring"""
A_ = ["pixel_values"]
def __init__( self: List[str] , __A: int = True , __A: List[Any] = None , __A: Dict = PILImageResampling.BILINEAR , __A: List[str] = True , __A: int = None , __A: Optional[int] = True , __A: Optional[Any] = 1 / 2_55 , __A: Optional[Any] = True , __A: List[Any] = None , __A: Optional[int] = None , **__A: List[str] , ) -> None:
super().__init__(**__A )
_A = size if size is not None else {"shortest_edge": 2_24}
_A = get_size_dict(__A , default_to_square=__A )
_A = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
_A = get_size_dict(__A , param_name='''crop_size''' )
_A = do_resize
_A = size
_A = do_center_crop
_A = crop_size
_A = resample
_A = do_rescale
_A = rescale_factor
_A = do_normalize
_A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self: str , __A: Dict , __A: int , __A: str = PILImageResampling.BILINEAR , __A: Any = None , **__A: List[Any] , ) -> np.ndarray:
_A = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" in size:
_A = get_resize_output_image_size(__A , size['''shortest_edge'''] , default_to_square=__A )
elif "height" in size and "width" in size:
_A = (size["height"], size["width"])
else:
raise ValueError(f"""Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}""" )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Dict , __A: Tuple = None , **__A: int , ) -> np.ndarray:
_A = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have \'height\' and \'width\' as keys. Got {size.keys()}""" )
return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A )
def __A ( self: Optional[int] , __A: Union[str, Any] , __A: List[str] , __A: str = None , **__A: List[str] , ) -> Tuple:
return rescale(__A , scale=__A , data_format=__A , **__A )
def __A ( self: Optional[Any] , __A: Optional[Any] , __A: Tuple , __A: Dict , __A: Any = None , **__A: Union[str, Any] , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def __A ( self: List[Any] , __A: Optional[int] , __A: Tuple = None , __A: List[Any] = None , __A: int = None , __A: Optional[Any] = None , __A: Any = None , __A: List[str] = None , __A: Optional[int] = None , __A: List[str] = None , __A: Dict = None , __A: Union[str, Any] = None , __A: Optional[int] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_A = to_numpy_array(__A )
if do_resize:
_A = self.resize(image=__A , size=__A , resample=__A )
if do_center_crop:
_A = self.center_crop(__A , size=__A )
if do_rescale:
_A = self.rescale(image=__A , scale=__A )
if do_normalize:
_A = self.normalize(image=__A , mean=__A , std=__A )
_A = to_channel_dimension_format(__A , __A )
return image
def __A ( self: Optional[Any] , __A: List[Any] , __A: Union[str, Any] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: Optional[Any] = None , __A: List[str] = None , __A: Union[str, Any] = None , __A: str = None , __A: Union[str, Any] = None , __A: Dict = None , __A: Dict = None , __A: List[Any] = None , __A: Optional[int] = ChannelDimension.FIRST , **__A: Union[str, Any] , ) -> PIL.Image.Image:
_A = do_resize if do_resize is not None else self.do_resize
_A = resample if resample is not None else self.resample
_A = do_center_crop if do_center_crop is not None else self.do_center_crop
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = image_mean if image_mean is not None else self.image_mean
_A = image_std if image_std is not None else self.image_std
_A = size if size is not None else self.size
_A = get_size_dict(__A , default_to_square=__A )
_A = crop_size if crop_size is not None else self.crop_size
_A = get_size_dict(__A , param_name='''crop_size''' )
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
_A = make_batched(__A )
_A = [
[
self._preprocess_image(
image=__A , do_resize=__A , size=__A , resample=__A , do_center_crop=__A , crop_size=__A , do_rescale=__A , rescale_factor=__A , do_normalize=__A , image_mean=__A , image_std=__A , data_format=__A , )
for img in video
]
for video in videos
]
_A = {"pixel_values": videos}
return BatchFeature(data=__A , tensor_type=__A )
| 713 |
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = word.split()
def justify(_lowercase , _lowercase , _lowercase ) -> str:
_A = max_width - width
_A = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_A = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_A = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_A = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_A = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_A = []
_A = []
_A = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase , _lowercase , _lowercase ) )
# reset new line and new width
_A ,_A = [word], len(_lowercase )
_A = max_width - width - len(_lowercase )
answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62 | 0 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__A = logging.get_logger('transformers.models.encodec')
__A = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
__A = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
__A = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
__A = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
__A = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__A = []
__A = []
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_A = getattr(a__ , a__ )
if weight_type is not None:
_A = getattr(a__ , a__ ).shape
else:
_A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
elif weight_type == "running_mean":
_A = value
elif weight_type == "running_var":
_A = value
elif weight_type == "num_batches_tracked":
_A = value
elif weight_type == "weight_ih_l0":
_A = value
elif weight_type == "weight_hh_l0":
_A = value
elif weight_type == "bias_ih_l0":
_A = value
elif weight_type == "bias_hh_l0":
_A = value
elif weight_type == "weight_ih_l1":
_A = value
elif weight_type == "weight_hh_l1":
_A = value
elif weight_type == "bias_ih_l1":
_A = value
elif weight_type == "bias_hh_l1":
_A = value
else:
_A = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_A ,_A = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = []
if model_name == "encodec_24khz" or "encodec_32khz":
_A = MAPPING_24K
elif model_name == "encodec_48khz":
_A = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(a__ , a__ ):
logger.info(f"""{name} was ignored""" )
continue
_A = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_A ,_A = key.split('''.*.''' )
if prefix in name and suffix in name:
_A = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
_A = True
if "*" in mapped_key:
_A = name.split(a__ )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight_ih_l0" in name:
_A = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
_A = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
_A = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
_A = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
_A = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
_A = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
_A = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
_A = '''bias_hh_l1'''
elif "bias" in name:
_A = '''bias'''
elif "weight" in name:
_A = '''weight'''
elif "running_mean" in name:
_A = '''running_mean'''
elif "running_var" in name:
_A = '''running_var'''
elif "num_batches_tracked" in name:
_A = '''num_batches_tracked'''
else:
_A = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , ):
'''simple docstring'''
if config_path is not None:
_A = EncodecConfig.from_pretrained(a__ )
else:
_A = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_A = [8, 5, 4, 4]
_A = [2.2]
_A = 64
_A = 3_20_00
_A = 20_48
_A = False
_A = False
_A = False
elif model_name == "encodec_48khz":
_A = [8, 5, 4, 2]
_A = [3.0, 6.0, 12.0, 24.0]
_A = 4_80_00
_A = 2
_A = False
_A = '''time_group_norm'''
_A = True
_A = 1.0
_A = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_A = EncodecModel(a__ )
_A = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(a__ )
_A = torch.load(a__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_A = original_checkpoint['''best_state''']
recursively_load_weights(a__ , a__ , a__ )
model.save_pretrained(a__ )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(a__ )
model.push_to_hub(a__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 714 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__A = '\\n Text data.\n Second line of data.'
__A = 'file'
@pytest.fixture(scope='''session''' )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_A = bytes(_lowercase , '''utf-8''' )
with zstd.open(_lowercase , '''wb''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture
def __A ( _lowercase ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f:
f.write(_lowercase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_A = input_paths[compression_format]
_A = tmp_path / '''cache'''
_A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase )
_A = cached_path(_lowercase , download_config=_lowercase )
with open(_lowercase ) as f:
_A = f.read()
with open(_lowercase ) as f:
_A = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = '''custom_cache'''
_A = '''custom_extracted_dir'''
_A = tmp_path / '''custom_extracted_path'''
if default_extracted:
_A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) )
_A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_A = xz_file
_A = (
DownloadConfig(extract_compressed_file=_lowercase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase )
)
_A = cached_path(_lowercase , download_config=_lowercase )
assert Path(_lowercase ).parent.parts[-2:] == expected
def __A ( _lowercase ):
'''simple docstring'''
_A = str(Path(_lowercase ).resolve() )
assert cached_path(_lowercase ) == text_file
# relative path
_A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowercase ) == text_file
def __A ( _lowercase ):
'''simple docstring'''
_A = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_lowercase ):
cached_path(_lowercase )
# relative path
_A = '''./__missing_file__.txt'''
with pytest.raises(_lowercase ):
cached_path(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(_lowercase ) as f:
_A = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( ):
'''simple docstring'''
with pytest.raises(_lowercase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
http_get('''https://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
fsspec_head('''s3://huggingface.co''' )
| 62 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: str , __A: List[str] , __A: Union[str, Any]=99 , __A: Optional[int]=13 , __A: List[Any]=7 , __A: List[Any]=9 , __A: List[str]=True , __A: List[Any]=True , __A: List[Any]=False , __A: Tuple=32 , __A: int=5 , __A: Optional[int]=4 , __A: Dict=37 , __A: Union[str, Any]=8 , __A: Dict=0.1 , __A: Dict=0.002 , __A: Dict=1 , __A: Union[str, Any]=0 , __A: int=0 , __A: List[str]=None , __A: Optional[Any]=None , ) -> List[str]:
_A = parent
_A = batch_size
_A = encoder_seq_length
_A = decoder_seq_length
# For common tests
_A = self.decoder_seq_length
_A = is_training
_A = use_attention_mask
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = d_ff
_A = relative_attention_num_buckets
_A = dropout_rate
_A = initializer_factor
_A = eos_token_id
_A = pad_token_id
_A = decoder_start_token_id
_A = None
_A = decoder_layers
def __A ( self: Any ) -> str:
return TaConfig.from_pretrained('''google/umt5-base''' )
def __A ( self: Tuple , __A: int , __A: str , __A: List[str] , __A: Optional[Any]=None , __A: str=None , __A: int=None , __A: Optional[int]=None , __A: Dict=None , ) -> Any:
if attention_mask is None:
_A = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_A = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_A = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__A )
if decoder_head_mask is None:
_A = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__A )
if cross_attn_head_mask is None:
_A = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __A ( self: Any ) -> Tuple:
_A = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_A = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_A = input_ids.clamp(self.pad_token_id + 1 )
_A = decoder_input_ids.clamp(self.pad_token_id + 1 )
_A = self.get_config()
_A = config.num_attention_heads
_A = self.prepare_inputs_dict(__A , __A , __A )
return config, input_dict
def __A ( self: str ) -> Dict:
_A ,_A = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self: Any ) -> Optional[Any]:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __A ( self: Dict ) -> Optional[Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __A ( self: Tuple , __A: List[str] , __A: Optional[Any] , __A: Tuple , __A: Any , __A: Optional[Any] , __A: int , ) -> List[Any]:
_A = UMTaModel(config=__A )
model.to(__A )
model.eval()
_A = model(
input_ids=__A , decoder_input_ids=__A , attention_mask=__A , decoder_attention_mask=__A , )
_A = model(input_ids=__A , decoder_input_ids=__A )
_A = result.last_hidden_state
_A = result.past_key_values
_A = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __A ( self: Optional[int] , __A: List[Any] , __A: str , __A: str , __A: Dict , __A: Optional[int] , __A: Any , ) -> Optional[Any]:
_A = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_A = model(__A , use_cache=__A )
_A = model(__A )
_A = model(__A , use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_A ,_A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_A = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_A = torch.cat([input_ids, next_tokens] , dim=-1 )
_A = model(__A )['''last_hidden_state''']
_A = model(__A , past_key_values=__A )['''last_hidden_state''']
# select random slice
_A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A = output_from_no_past[:, -1, random_slice_idx].detach()
_A = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-3 ) )
def __A ( self: str , __A: Union[str, Any] , __A: Optional[int] , ) -> Dict:
_A = UMTaModel(config=__A ).to(__A ).half().eval()
_A = model(**__A )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A_ = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = True
A_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A_ = [0.8, 0.9]
def __A ( self: List[str] ) -> List[Any]:
_A = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __A ( self: Any ) -> Union[str, Any]:
_A = self.model_tester.prepare_config_and_inputs()
_A = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__A , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __A ( self: Dict ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __A ( self: List[str] ) -> int:
_A = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_A = self.model_tester.prepare_config_and_inputs()
_A = config_and_inputs[0]
_A = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_A = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__A ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ),
}
for attn_name, (name, mask) in zip(__A , head_masking.items() ):
_A = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_A = torch.ones(
config.num_decoder_layers , config.num_heads , device=__A )
_A = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__A , return_dict_in_generate=__A , **__A , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_A = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __A ( self: List[Any] ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __A ( self: Any ) -> List[str]:
_A = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__A ).to(__A )
_A = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__A , legacy=__A )
_A = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_A = tokenizer(__A , return_tensors='''pt''' , padding=__A ).input_ids
# fmt: off
_A = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A , __A )
_A = model.generate(input_ids.to(__A ) )
_A = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_A = tokenizer.batch_decode(__A )
self.assertEqual(__A , __A )
| 715 |
import math
def __A ( _lowercase ):
'''simple docstring'''
_A = []
_A = 2
_A = int(math.sqrt(_lowercase ) ) # Size of every segment
_A = [True] * (end + 1)
_A = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowercase )
for i in range(start * start , end + 1 , _lowercase ):
_A = False
start += 1
prime += in_prime
_A = end + 1
_A = min(2 * end , _lowercase )
while low <= n:
_A = [True] * (high - low + 1)
for each in in_prime:
_A = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowercase , high + 1 , _lowercase ):
_A = False
for j in range(len(_lowercase ) ):
if temp[j] is True:
prime.append(j + low )
_A = high + 1
_A = min(high + end , _lowercase )
return prime
print(sieve(10**6))
| 62 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__A = TypeVar('T')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
def __init__( self: Tuple , __A: list[T] , __A: Callable[[T, T], T] ) -> str:
_A = None
_A = len(__SCREAMING_SNAKE_CASE )
_A = [any_type for _ in range(self.N )] + arr
_A = fnc
self.build()
def __A ( self: Optional[int] ) -> int:
for p in range(self.N - 1 , 0 , -1 ):
_A = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __A ( self: List[Any] , __A: int , __A: T ) -> int:
p += self.N
_A = v
while p > 1:
_A = p // 2
_A = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __A ( self: List[Any] , __A: int , __A: int ) -> List[str]: # noqa: E741
_A ,_A = l + self.N, r + self.N
_A = None
while l <= r:
if l % 2 == 1:
_A = self.st[l] if res is None else self.fn(__SCREAMING_SNAKE_CASE , self.st[l] )
if r % 2 == 0:
_A = self.st[r] if res is None else self.fn(__SCREAMING_SNAKE_CASE , self.st[r] )
_A ,_A = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__A = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__A = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__A = SegmentTree(test_array, min)
__A = SegmentTree(test_array, max)
__A = SegmentTree(test_array, lambda a, b: a + b)
def __A ( ):
'''simple docstring'''
for i in range(len(_UpperCAmelCase ) ):
for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ):
_A = reduce(_UpperCAmelCase , test_array[i : j + 1] )
_A = reduce(_UpperCAmelCase , test_array[i : j + 1] )
_A = reduce(lambda _lowercase , _lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_UpperCAmelCase , _UpperCAmelCase )
assert max_range == max_segment_tree.query(_UpperCAmelCase , _UpperCAmelCase )
assert sum_range == sum_segment_tree.query(_UpperCAmelCase , _UpperCAmelCase )
test_all_segments()
for index, value in test_updates.items():
__A = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 716 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = jnp.floataa
def __A ( self: Tuple ) -> Tuple:
_A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __A: Dict ) -> Tuple:
_A ,_A ,_A ,_A = hidden_states.shape
_A = jax.image.resize(
__A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
_A = self.conv(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = jnp.floataa
def __A ( self: List[str] ) -> Tuple:
_A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_A = self.conv(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = None
A_ = 0.0
A_ = None
A_ = jnp.floataa
def __A ( self: Dict ) -> Dict:
_A = self.in_channels if self.out_channels is None else self.out_channels
_A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_A = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_A = nn.Dense(__A , dtype=self.dtype )
_A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_A = nn.Dropout(self.dropout_prob )
_A = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_A = None
if use_nin_shortcut:
_A = nn.Conv(
__A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]:
_A = hidden_states
_A = self.norma(__A )
_A = nn.swish(__A )
_A = self.conva(__A )
_A = self.time_emb_proj(nn.swish(__A ) )
_A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 )
_A = hidden_states + temb
_A = self.norma(__A )
_A = nn.swish(__A )
_A = self.dropout(__A , __A )
_A = self.conva(__A )
if self.conv_shortcut is not None:
_A = self.conv_shortcut(__A )
return hidden_states + residual
| 62 | 0 |
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowercase ) )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if index == len(_lowercase ):
return True
# Recursive Step
for i in range(_lowercase ):
if valid_coloring(graph[index] , _lowercase , _lowercase ):
# Color current vertex
_A = i
# Validate coloring
if util_color(_lowercase , _lowercase , _lowercase , index + 1 ):
return True
# Backtrack
_A = -1
return False
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = [-1] * len(_lowercase )
if util_color(_lowercase , _lowercase , _lowercase , 0 ):
return colored_vertices
return []
| 717 |
def __A ( _lowercase ):
'''simple docstring'''
_A = [0] * len(_lowercase )
_A = []
_A = []
_A = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowercase ) ):
if indegree[i] == 0:
queue.append(_lowercase )
while queue:
_A = queue.pop(0 )
cnt += 1
topo.append(_lowercase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowercase )
if cnt != len(_lowercase ):
print('''Cycle exists''' )
else:
print(_lowercase )
# Adjacency List of Graph
__A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 62 | 0 |
from __future__ import annotations
import math
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(_lowerCamelCase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
def __A ( ):
'''simple docstring'''
_A = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
_A = math.log(len(_lowerCamelCase ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 718 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class SCREAMING_SNAKE_CASE ( snake_case , snake_case ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__A )
# standard deviation of the initial noise distribution
_A = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_A = 4
# running values
_A = []
def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int:
_A = num_inference_steps
_A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_A = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_A = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_A = torch.sin(steps * math.pi / 2 ) ** 2
_A = (1.0 - self.betas**2) ** 0.5
_A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_A = timesteps.to(__A )
_A = []
def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
_A = (self.timesteps == timestep).nonzero().item()
_A = timestep_index + 1
_A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__A )
if len(self.ets ) == 1:
_A = self.ets[-1]
elif len(self.ets ) == 2:
_A = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_A = self._get_prev_sample(__A , __A , __A , __A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor:
return sample
def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]:
_A = self.alphas[timestep_index]
_A = self.betas[timestep_index]
_A = self.alphas[prev_timestep_index]
_A = self.betas[prev_timestep_index]
_A = (sample - sigma * ets) / max(__A , 1e-8 )
_A = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[str] ) -> Dict:
return self.config.num_train_timesteps
| 62 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 719 |
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A ,_A = len(_lowercase ), len(grid[0] )
if (
min(_lowercase , _lowercase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_A = 0
count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: List[Any] ) -> int:
_A = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_A = dict(zip(_a , range(len(_a ) ) ) )
_A = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_A = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_A = tempfile.mkdtemp()
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , _a )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
# load decoder from hub
_A = '''hf-internal-testing/ngram-beam-search-decoder'''
def __A ( self: Optional[Any] , **__A: str ) -> Tuple:
_A = self.add_kwargs_tokens_map.copy()
kwargs.update(_a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_a )
def __A ( self: Union[str, Any] , **__A: List[str] ) -> Dict:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_a )
def __A ( self: Any , **__A: List[str] ) -> Union[str, Any]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_a )
def __A ( self: Dict ) -> str:
shutil.rmtree(self.tmpdirname )
def __A ( self: int ) -> int:
_A = self.get_tokenizer()
_A = self.get_feature_extractor()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
processor.save_pretrained(self.tmpdirname )
_A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _a )
def __A ( self: List[Any] ) -> List[Any]:
_A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __A ( self: Dict ) -> Dict:
_A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_a , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __A ( self: int ) -> str:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
_A = floats_list((3, 10_00) )
_A = feature_extractor(_a , return_tensors='''np''' )
_A = processor(_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self: Dict ) -> Dict:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
_A = '''This is a test string'''
_A = processor(text=_a )
_A = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self: Dict , __A: Optional[Any]=(2, 10, 16) , __A: str=77 ) -> str:
np.random.seed(_a )
return np.random.rand(*_a )
def __A ( self: List[str] ) -> List[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
_A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_A = processor.decode(_a )
_A = decoder.decode_beams(_a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __A ( self: Dict , __A: List[str] ) -> Union[str, Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
_A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_A = processor.batch_decode(_a )
else:
with get_context(_a ).Pool() as pool:
_A = processor.batch_decode(_a , _a )
_A = list(_a )
with get_context('''fork''' ).Pool() as p:
_A = decoder.decode_beams_batch(_a , _a )
_A ,_A ,_A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_a , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_a , decoded_processor.logit_score )
self.assertListEqual(_a , decoded_processor.lm_score )
def __A ( self: int ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
_A = self._get_dummy_logits()
_A = 15
_A = -20.0
_A = -4.0
_A = processor.batch_decode(
_a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , )
_A = decoded_processor_out.text
_A = list(_a )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
_a , _a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , )
_A = [d[0][0] for d in decoded_decoder_out]
_A = [d[0][2] for d in decoded_decoder_out]
_A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_a , _a )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _a )
self.assertTrue(np.array_equal(_a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _a , atol=1e-3 ) )
self.assertTrue(np.array_equal(_a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , _a , atol=1e-3 ) )
def __A ( self: Dict ) -> Dict:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
_A = self._get_dummy_logits()
_A = 2.0
_A = 5.0
_A = -20.0
_A = True
_A = processor.batch_decode(
_a , alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , )
_A = decoded_processor_out.text
_A = list(_a )
decoder.reset_params(
alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
_a , _a , )
_A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_a , _a )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _a )
_A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _a )
def __A ( self: Any ) -> List[str]:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(_a )
_A = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_a , _a )
def __A ( self: List[Any] ) -> Tuple:
_A = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_A = WavaVecaProcessorWithLM.from_pretrained(_a )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(_a )
_A = os.listdir(_a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_a , _a )
def __A ( self: int ) -> Dict:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = floats_list((3, 10_00) )
_A = processor_wavaveca(_a , return_tensors='''np''' )
_A = processor_auto(_a , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_A = self._get_dummy_logits()
_A = processor_wavaveca.batch_decode(_a )
_A = processor_auto.batch_decode(_a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __A ( self: List[Any] ) -> List[str]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __A ( __A: List[Any] , __A: Any ) -> List[Any]:
_A = [d[key] for d in offsets]
return retrieved_list
def __A ( self: List[Any] ) -> Tuple:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()[0]
_A = processor.decode(_a , output_word_offsets=_a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_a , _a ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __A ( self: int ) -> Any:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()
_A = processor.batch_decode(_a , output_word_offsets=_a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_a , _a ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_a , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __A ( self: List[str] ) -> Tuple:
import torch
_A = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_a )
_A = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_A = iter(_a )
_A = next(_a )
_A = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_A = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_A = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_A = model(_a ).logits.cpu().numpy()
_A = processor.decode(logits[0] , output_word_offsets=_a )
_A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_A = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_A = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_a , '''word''' ) ) , _a )
self.assertEqual(''' '''.join(self.get_from_offsets(_a , '''word''' ) ) , output.text )
# output times
_A = torch.tensor(self.get_from_offsets(_a , '''start_time''' ) )
_A = torch.tensor(self.get_from_offsets(_a , '''end_time''' ) )
# fmt: off
_A = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
_A = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) )
self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) )
| 720 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__A = NewType('DataClass', Any)
__A = NewType('DataClassType', Any)
def __A ( _lowercase ):
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __A ( _lowercase ):
'''simple docstring'''
_A = {str(_lowercase ): choice for choice in choices}
return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase )
def __A ( *,
_lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_A = {}
if aliases is not None:
_A = aliases
if help is not None:
_A = help
return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 42
def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_A = ArgumentDefaultsHelpFormatter
super().__init__(**__A )
if dataclasses.is_dataclass(__A ):
_A = [dataclass_types]
_A = list(__A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__A )
@staticmethod
def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str:
_A = f"""--{field.name}"""
_A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __A ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
_A = kwargs.pop('''aliases''' , [] )
if isinstance(__A , __A ):
_A = [aliases]
_A = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__A ) not in field.type.__args__:
# filter `str` in Union
_A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_A = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_A = (
field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1]
)
_A = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_A = {}
if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )):
if origin_type is Literal:
_A = field.type.__args__
else:
_A = [x.value for x in field.type]
_A = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
_A = field.default
else:
_A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_A = copy(__A )
# Hack because type=bool in argparse does not behave as we want.
_A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_A = default
# This tells argparse we accept 0 or 1 value after --field_name
_A = '''?'''
# This is the value that will get picked if we do --field_name (without value)
_A = True
elif isclass(__A ) and issubclass(__A , __A ):
_A = field.type.__args__[0]
_A = '''+'''
if field.default_factory is not dataclasses.MISSING:
_A = field.default_factory()
elif field.default is dataclasses.MISSING:
_A = True
else:
_A = field.type
if field.default is not dataclasses.MISSING:
_A = field.default
elif field.default_factory is not dataclasses.MISSING:
_A = field.default_factory()
else:
_A = True
parser.add_argument(__A , *__A , **__A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_A = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A )
def __A ( self: Dict , __A: DataClassType ) -> List[Any]:
if hasattr(__A , '''_argument_group_name''' ):
_A = self.add_argument_group(dtype._argument_group_name )
else:
_A = self
try:
_A = get_type_hints(__A )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ):
_A = '''.'''.join(map(__A , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__A ):
if not field.init:
continue
_A = type_hints[field.name]
self._parse_dataclass_field(__A , __A )
def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_A = []
if args_filename:
args_files.append(Path(__A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_A = ArgumentParser()
args_file_parser.add_argument(__A , type=__A , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
_A ,_A = args_file_parser.parse_known_args(args=__A )
_A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A )
if cmd_args_file_paths:
args_files.extend([Path(__A ) for p in cmd_args_file_paths] )
_A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_A = file_args + args if args is not None else file_args + sys.argv[1:]
_A ,_A = self.parse_known_args(args=__A )
_A = []
for dtype in self.dataclass_types:
_A = {f.name for f in dataclasses.fields(__A ) if f.init}
_A = {k: v for k, v in vars(__A ).items() if k in keys}
for k in keys:
delattr(__A , __A )
_A = dtype(**__A )
outputs.append(__A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]:
_A = set(args.keys() )
_A = []
for dtype in self.dataclass_types:
_A = {f.name for f in dataclasses.fields(__A ) if f.init}
_A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_A = dtype(**__A )
outputs.append(__A )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" )
return tuple(__A )
def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]:
with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file:
_A = json.loads(open_json_file.read() )
_A = self.parse_dict(__A , allow_extra_keys=__A )
return tuple(__A )
def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]:
_A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A )
return tuple(__A )
| 62 | 0 |
import os
from distutils.util import strtobool
def __A ( _lowercase , _lowercase ):
for e in env_keys:
_A = int(os.environ.get(_lowerCamelCase , -1 ) )
if val >= 0:
return val
return default
def __A ( _lowercase , _lowercase=False ):
_A = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) )
return strtobool(_lowerCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def __A ( _lowercase , _lowercase="no" ):
_A = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) )
return value
| 721 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str:
_A = parent
_A = batch_size
_A = is_training
_A = use_auxiliary_loss
_A = num_queries
_A = num_channels
_A = min_size
_A = max_size
_A = num_labels
_A = mask_feature_size
def __A ( self: Dict ) -> Optional[int]:
_A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__A )
_A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A )
_A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5
).float()
_A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long()
_A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __A ( self: Optional[Any] ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __A ( self: Dict ) -> Tuple:
_A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs()
_A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int:
_A = output.encoder_hidden_states
_A = output.pixel_decoder_hidden_states
_A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers )
def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any:
with torch.no_grad():
_A = MaskFormerModel(config=__A )
model.to(__A )
model.eval()
_A = model(pixel_values=__A , pixel_mask=__A )
_A = model(__A , output_hidden_states=__A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__A , __A )
def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int:
_A = MaskFormerForInstanceSegmentation(config=__A )
model.to(__A )
model.eval()
def comm_check_on_output(__A: int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_A = model(pixel_values=__A , pixel_mask=__A )
_A = model(__A )
comm_check_on_output(__A )
_A = model(
pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A )
comm_check_on_output(__A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def __A ( self: int ) -> Tuple:
_A = MaskFormerModelTester(self )
_A = ConfigTester(self , config_class=__A , has_text_modality=__A )
def __A ( self: List[Any] ) -> Dict:
self.config_tester.run_common_tests()
def __A ( self: Optional[Any] ) -> int:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A )
def __A ( self: Dict ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def __A ( self: int ) -> Tuple:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def __A ( self: List[Any] ) -> Any:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def __A ( self: Union[str, Any] ) -> Optional[int]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def __A ( self: int ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __A ( self: Union[str, Any] ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: List[Any] ) -> Any:
pass
def __A ( self: Dict ) -> Optional[Any]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
@slow
def __A ( self: int ) -> Optional[Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_A = MaskFormerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __A ( self: Optional[Any] ) -> Optional[int]:
_A = (self.model_tester.min_size,) * 2
_A = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__A ),
'''mask_labels''': torch.randn((2, 10, *size) , device=__A ),
'''class_labels''': torch.zeros(2 , 10 , device=__A ).long(),
}
_A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A )
_A = model(**__A )
self.assertTrue(outputs.loss is not None )
def __A ( self: Optional[Any] ) -> List[Any]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A )
def __A ( self: Any ) -> Tuple:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A ).to(__A )
_A = model(**__A , output_attentions=__A )
self.assertTrue(outputs.attentions is not None )
def __A ( self: Dict ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_A = self.all_model_classes[1]
_A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs()
_A = model_class(__A )
model.to(__A )
model.train()
_A = model(__A , mask_labels=__A , class_labels=__A ).loss
loss.backward()
def __A ( self: Tuple ) -> Optional[Any]:
# only MaskFormerForInstanceSegmentation has the loss
_A = self.all_model_classes[1]
_A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs()
_A = True
_A = True
_A = model_class(__A )
model.to(__A )
model.train()
_A = model(__A , mask_labels=__A , class_labels=__A )
_A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def __A ( ):
'''simple docstring'''
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self: Union[str, Any] ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def __A ( self: List[Any] ) -> Any:
_A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__A , return_tensors='''pt''' ).to(__A )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**__A )
_A = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) )
_A = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) )
_A = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) )
def __A ( self: Dict ) -> Dict:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__A )
.eval()
)
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__A , return_tensors='''pt''' ).to(__A )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**__A )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_A = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
_A = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) )
def __A ( self: List[Any] ) -> Dict:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__A )
.eval()
)
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__A , return_tensors='''pt''' ).to(__A )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**__A )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
_A = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) )
def __A ( self: Optional[Any] ) -> str:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__A )
.eval()
)
_A = self.default_image_processor
_A = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
_A = inputs['''pixel_values'''].to(__A )
_A = [el.to(__A ) for el in inputs['''mask_labels''']]
_A = [el.to(__A ) for el in inputs['''class_labels''']]
with torch.no_grad():
_A = model(**__A )
self.assertTrue(outputs.loss is not None )
| 62 | 0 |
def __A ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_lowercase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_lowercase ) == 1:
return True
_A = series[1] - series[0]
for index in range(len(_lowercase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __A ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_lowercase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
_A = 0
for val in series:
answer += val
return answer / len(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str:
_A = question_encoder
_A = generator
_A = self.question_encoder
def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict:
if os.path.isfile(__A ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__A , exist_ok=__A )
_A = os.path.join(__A , '''question_encoder_tokenizer''' )
_A = os.path.join(__A , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__A )
self.generator.save_pretrained(__A )
@classmethod
def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_A = kwargs.pop('''config''' , __A )
if config is None:
_A = RagConfig.from_pretrained(__A )
_A = AutoTokenizer.from_pretrained(
__A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
_A = AutoTokenizer.from_pretrained(
__A , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__A , generator=__A )
def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int:
return self.current_tokenizer(*__A , **__A )
def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict:
return self.generator.batch_decode(*__A , **__A )
def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple:
return self.generator.decode(*__A , **__A )
def __A ( self: Dict ) -> List[str]:
_A = self.question_encoder
def __A ( self: Union[str, Any] ) -> int:
_A = self.generator
def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __A , )
if max_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
__A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , )
_A = labels['''input_ids''']
return model_inputs
| 62 | 0 |
from collections.abc import Callable
import numpy as np
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = int(np.ceil((x_end - xa) / step_size ) )
_A = np.zeros((n + 1,) )
_A = ya
_A = xa
for k in range(SCREAMING_SNAKE_CASE_ ):
_A = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE_ , y[k] )
_A = y[k] + (
(step_size / 2) * (ode_func(SCREAMING_SNAKE_CASE_ , y[k] ) + ode_func(x + step_size , SCREAMING_SNAKE_CASE_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
from __future__ import annotations
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741
'''simple docstring'''
while r - l > 1:
_A = (l + r) // 2
if v[m] >= key:
_A = m
else:
_A = m # noqa: E741
return r
def __A ( _lowercase ):
'''simple docstring'''
if len(_lowercase ) == 0:
return 0
_A = [0] * len(_lowercase )
_A = 1
_A = v[0]
for i in range(1 , len(_lowercase ) ):
if v[i] < tail[0]:
_A = v[i]
elif v[i] > tail[length - 1]:
_A = v[i]
length += 1
else:
_A = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_A = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_A = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_A = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "sequence-classification"
def __init__( self: str , __A: Union[str, Any] ) -> List[str]:
if type(__A ) == dict:
_A = Namespace(**__A )
_A = glue_output_modes[hparams.task]
_A = glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]:
return self.model(**__A )
def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]:
_A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_A = self(**__A )
_A = outputs[0]
_A = self.trainer.lr_schedulers[0]['''scheduler''']
_A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __A ( self: List[str] ) -> Dict:
_A = self.hparams
_A = processors[args.task]()
_A = processor.get_labels()
for mode in ["train", "dev"]:
_A = self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __A )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
_A = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
_A = convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __A )
torch.save(__A , __A )
def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader:
_A = '''dev''' if mode == '''test''' else mode
_A = self._feature_file(__A )
logger.info('''Loading features from cached file %s''' , __A )
_A = torch.load(__A )
_A = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_A = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_A = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def __A ( self: List[str] , __A: str , __A: Tuple ) -> str:
_A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_A = self(**__A )
_A ,_A = outputs[:2]
_A = logits.detach().cpu().numpy()
_A = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __A ( self: str , __A: Dict ) -> tuple:
_A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
_A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_A = np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_A = np.squeeze(__A )
_A = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
_A = [[] for _ in range(out_label_ids.shape[0] )]
_A = [[] for _ in range(out_label_ids.shape[0] )]
_A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_A = dict(results.items() )
_A = results
return ret, preds_list, out_label_list
def __A ( self: Any , __A: list ) -> dict:
_A ,_A ,_A = self._eval_end(__A )
_A = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __A ( self: int , __A: Union[str, Any] ) -> dict:
_A ,_A ,_A = self._eval_end(__A )
_A = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=__A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __A ( ):
'''simple docstring'''
_A = argparse.ArgumentParser()
add_generic_args(_lowercase , os.getcwd() )
_A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() )
_A = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_A = os.path.join(
'''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , )
os.makedirs(args.output_dir )
_A = GLUETransformer(_lowercase )
_A = generic_train(_lowercase , _lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) )
_A = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowercase )
if __name__ == "__main__":
main()
| 62 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__A )
class SCREAMING_SNAKE_CASE ( __A ):
"""simple docstring"""
def __init__( self: List[str] , *__A: List[Any] , **__A: Optional[Any] ) -> Union[str, Any]:
super().__init__(*__A , **__A )
self.check_model_type(__A )
def __A ( self: Any , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: Dict ) -> List[Any]:
_A = {}, {}
if padding is not None:
_A = padding
if truncation is not None:
_A = truncation
if top_k is not None:
_A = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: List[str] , __A: List[Any] , __A: Dict = None , **__A: str ) -> str:
if isinstance(__A , (Image.Image, str) ) and isinstance(__A , __A ):
_A = {'image': image, 'question': question}
else:
_A = image
_A = super().__call__(__A , **__A )
return results
def __A ( self: Dict , __A: List[Any] , __A: Dict=False , __A: int=False ) -> int:
_A = load_image(inputs['''image'''] )
_A = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=__A , truncation=__A )
_A = self.image_processor(images=__A , return_tensors=self.framework )
model_inputs.update(__A )
return model_inputs
def __A ( self: List[str] , __A: List[str] ) -> str:
_A = self.model(**__A )
return model_outputs
def __A ( self: Optional[Any] , __A: str , __A: Tuple=5 ) -> int:
if top_k > self.model.config.num_labels:
_A = self.model.config.num_labels
if self.framework == "pt":
_A = model_outputs.logits.sigmoid()[0]
_A = probs.topk(__A )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
_A = scores.tolist()
_A = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__A , __A )]
| 703 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __A ( _lowercase = "" ):
'''simple docstring'''
_A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
_A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' )
_A = soup.find_all('''td''' , attrs='''titleColumn''' )
_A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowercase , _lowercase )
}
def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
_A = get_imdb_top_aaa_movies()
with open(_lowercase , '''w''' , newline='''''' ) as out_file:
_A = csv.writer(_lowercase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 62 | 0 |
from __future__ import annotations
def __A ( _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
if start is None:
_A = 0
if end is None:
_A = len(__snake_case ) - 1
if start >= end:
return
_A = (start + end) // 2
slowsort(__snake_case , __snake_case , __snake_case )
slowsort(__snake_case , mid + 1 , __snake_case )
if sequence[end] < sequence[mid]:
_A ,_A = sequence[mid], sequence[end]
slowsort(__snake_case , __snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = BlenderbotSmallTokenizer
A_ = False
def __A ( self: List[str] ) -> int:
super().setUp()
_A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def __A ( self: str , **__A: Optional[Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: str , __A: List[str] ) -> int:
_A = '''adapt act apte'''
_A = '''adapt act apte'''
return input_text, output_text
def __A ( self: Union[str, Any] ) -> Any:
_A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = '''adapt act apte'''
_A = ['''adapt''', '''act''', '''ap@@''', '''te''']
_A = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def __A ( self: Any ) -> List[str]:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
_A = '''I am a small frog.'''
_A = tok([src_text] , padding=__A , truncation=__A )['''input_ids''']
_A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __A ( self: Any ) -> int:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_A = '''I am a small frog .'''
_A = '''.'''
_A = tok(__A )['''input_ids''']
_A = tok(__A )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 62 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_A = load_file(lowerCAmelCase__ )
_A = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_A = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_A = pipeline.text_encoder
else:
_A = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_A = pipeline.unet
# find the target layer
_A = layer_infos.pop(0 )
while len(lowerCAmelCase__ ) > -1:
try:
_A = curr_layer.__getattr__(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
_A = layer_infos.pop(0 )
elif len(lowerCAmelCase__ ) == 0:
break
except Exception:
if len(lowerCAmelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_A = layer_infos.pop(0 )
_A = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowerCAmelCase__ )
else:
pair_keys.append(lowerCAmelCase__ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_A = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_A = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase__ , lowerCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
_A = state_dict[pair_keys[0]].to(torch.floataa )
_A = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase__ , lowerCAmelCase__ )
# update visited list
for item in pair_keys:
visited.append(lowerCAmelCase__ )
return pipeline
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
__A = parser.parse_args()
__A = args.base_model_path
__A = args.checkpoint_path
__A = args.dump_path
__A = args.lora_prefix_unet
__A = args.lora_prefix_text_encoder
__A = args.alpha
__A = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__A = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "roberta"
def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@property
def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 62 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
A_ = BertTokenizer
A_ = BertTokenizerFast
A_ = True
A_ = True
A_ = filter_non_english
def __A ( self: int ) -> List[str]:
super().setUp()
_A = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self: Any , __A: List[str] ) -> List[str]:
_A = "UNwant\u00E9d,running"
_A = "unwanted, running"
return input_text, output_text
def __A ( self: List[str] ) -> Union[str, Any]:
_A = self.tokenizer_class(self.vocab_file )
_A = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(a_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [9, 6, 7, 12, 10, 11] )
def __A ( self: Any ) -> List[str]:
if not self.test_rust_tokenizer:
return
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = "UNwant\u00E9d,running"
_A = tokenizer.tokenize(a_ )
_A = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_A = tokenizer.encode(a_ , add_special_tokens=a_ )
_A = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_A = self.get_rust_tokenizer()
_A = tokenizer.encode(a_ )
_A = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# With lower casing
_A = self.get_tokenizer(do_lower_case=a_ )
_A = self.get_rust_tokenizer(do_lower_case=a_ )
_A = "UNwant\u00E9d,running"
_A = tokenizer.tokenize(a_ )
_A = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_A = tokenizer.encode(a_ , add_special_tokens=a_ )
_A = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_A = self.get_rust_tokenizer()
_A = tokenizer.encode(a_ )
_A = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def __A ( self: int ) -> int:
_A = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __A ( self: Dict ) -> List[str]:
_A = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __A ( self: Tuple ) -> Optional[Any]:
_A = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __A ( self: Tuple ) -> str:
_A = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __A ( self: Dict ) -> Optional[int]:
_A = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __A ( self: Optional[int] ) -> Any:
_A = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __A ( self: Optional[int] ) -> List[Any]:
_A = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __A ( self: Any ) -> Optional[int]:
_A = BasicTokenizer(do_lower_case=a_ , strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __A ( self: int ) -> Optional[Any]:
_A = BasicTokenizer(do_lower_case=a_ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __A ( self: Dict ) -> Tuple:
_A = BasicTokenizer()
_A = "a\n'll !!to?'d of, can't."
_A = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(a_ ) , a_ )
def __A ( self: int ) -> List[Any]:
_A = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_A = {}
for i, token in enumerate(a_ ):
_A = i
_A = WordpieceTokenizer(vocab=a_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __A ( self: Optional[int] ) -> List[str]:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __A ( self: Dict ) -> str:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __A ( self: Union[str, Any] ) -> Dict:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __A ( self: str ) -> List[Any]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(a_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __A ( self: Tuple ) -> Optional[Any]:
_A = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
_A = tokenizer.encode('''sequence builders''' , add_special_tokens=a_ )
_A = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a_ )
_A = tokenizer.build_inputs_with_special_tokens(a_ )
_A = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def __A ( self: List[str] ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
_A = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_A = tokenizer_r.encode_plus(
a_ , return_attention_mask=a_ , return_token_type_ids=a_ , return_offsets_mapping=a_ , add_special_tokens=a_ , )
_A = tokenizer_r.do_lower_case if hasattr(a_ , '''do_lower_case''' ) else False
_A = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __A ( self: List[str] ) -> Union[str, Any]:
_A = ["的", "人", "有"]
_A = "".join(a_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A = True
_A = self.tokenizer_class.from_pretrained(a_ , **a_ )
_A = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
_A = tokenizer_p.encode(a_ , add_special_tokens=a_ )
_A = tokenizer_r.encode(a_ , add_special_tokens=a_ )
_A = tokenizer_r.convert_ids_to_tokens(a_ )
_A = tokenizer_p.convert_ids_to_tokens(a_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , a_ )
_A = False
_A = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
_A = self.tokenizer_class.from_pretrained(a_ , **a_ )
_A = tokenizer_r.encode(a_ , add_special_tokens=a_ )
_A = tokenizer_p.encode(a_ , add_special_tokens=a_ )
_A = tokenizer_r.convert_ids_to_tokens(a_ )
_A = tokenizer_p.convert_ids_to_tokens(a_ )
# it is expected that only the first Chinese character is not preceded by "##".
_A = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(a_ )
]
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , a_ )
| 706 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__A = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]:
super().__init__(*__A , **__A )
_A = eval_examples
_A = post_process_function
_A = quant_trainer_args
_A = 1_28 # default number of calibration samples
def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
_A = calib_dataset if calib_dataset is not None else self.calib_dataset
_A = self._remove_unused_columns(__A , description='''Calibration''' )
return DataLoader(
__A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , )
def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]:
_A = self.train_dataset if calib_dataset is None else calib_dataset
_A = self.get_calib_dataloader(__A )
_A = self.model
quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A )
model.eval()
quant_trainer.enable_calibration(__A )
logger.info('''***** Running calibration *****''' )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(__A ):
# Prediction step
_A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__A , self.quant_trainer_args )
_A = model
def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int:
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(__A )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
__A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
_A = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_A = self.post_process_function(__A , __A , output.predictions )
_A = self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_A = metrics.pop(__A )
self.log(__A )
else:
_A = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A )
return metrics
def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]:
_A = self.get_test_dataloader(__A )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
__A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
_A = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(__A , __A , output.predictions , '''predict''' )
_A = self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_A = metrics.pop(__A )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A )
def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]:
_A = self.eval_dataset
_A = self.get_eval_dataloader(__A )
_A = next(iter(__A ) )
# saving device - to make it consistent
_A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
_A = tuple(v.to(__A ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
_A = True
_A = self.model.to(__A )
model.eval()
model.float()
_A = model.module if hasattr(__A , '''module''' ) else model
quant_trainer.configure_model(__A , self.quant_trainer_args )
_A = os.path.join(__A , '''model.onnx''' )
logger.info(f"""exporting model to {output_model_file}""" )
_A = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__A , )
logger.info('''onnx export finished''' )
| 62 | 0 |
import os
import sys
import unittest
__A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: List[Any] ) -> Union[str, Any]:
_A = find_backend(''' if not is_torch_available():''' )
self.assertEqual(__A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_A = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(__A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_A = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(__A , '''torch_and_transformers_and_onnx''' )
def __A ( self: Dict ) -> Optional[int]:
_A = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __A )
self.assertIn('''torch_and_transformers''' , __A )
self.assertIn('''flax_and_transformers''' , __A )
self.assertIn('''torch_and_transformers_and_onnx''' , __A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self: int ) -> List[Any]:
_A = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(__A , '''\nCONSTANT = None\n''' )
_A = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
__A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_A = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
_A = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(__A , __A )
def __A ( self: str ) -> Optional[int]:
_A = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
'''
_A = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , __A )
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
"""simple docstring"""
A_ = ["image_processor", "tokenizer"]
A_ = "AutoImageProcessor"
A_ = "AutoTokenizer"
def __init__( self: int , __A: str , __A: Dict ) -> int:
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = self.image_processor
def __call__( self: List[str] , __A: Tuple=None , __A: Optional[int]=None , __A: List[Any]=None , **__A: str ) -> Tuple:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_A = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
_A = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def __A ( self: Optional[int] , *__A: int , **__A: Optional[Any] ) -> Optional[int]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __A ( self: Union[str, Any] , *__A: Optional[Any] , **__A: Tuple ) -> Optional[int]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def __A ( self: Optional[int] ) -> Union[str, Any]:
return ["input_ids", "attention_mask", "pixel_values"]
| 708 |
import itertools
import string
from collections.abc import Generator, Iterable
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = iter(_lowercase )
while True:
_A = tuple(itertools.islice(_lowercase , _lowercase ) )
if not chunk:
return
yield chunk
def __A ( _lowercase ):
'''simple docstring'''
_A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_A = ''''''
if len(_lowercase ) < 2:
return dirty
for i in range(len(_lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowercase ) & 1:
clean += "X"
return clean
def __A ( _lowercase ):
'''simple docstring'''
_A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_A = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowercase )
return table
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = generate_table(_lowercase )
_A = prepare_input(_lowercase )
_A = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
_A ,_A = divmod(table.index(_lowercase ) , 5 )
_A ,_A = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = generate_table(_lowercase )
_A = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
_A ,_A = divmod(table.index(_lowercase ) , 5 )
_A ,_A = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 62 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class SCREAMING_SNAKE_CASE ( _A ):
"""simple docstring"""
def __init__( self: List[str] , *__A: List[str] , **__A: Optional[int] ) -> Any:
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
_A = {}
def __A ( self: Optional[Any] , __A: Optional[Any] , *__A: Union[str, Any] , **__A: Optional[int] ) -> int:
_A = super().add_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''' )
def __A ( self: Any , __A: Union[str, Any] , *__A: Optional[Any] , __A: str=1 , **__A: Dict ) -> str:
_A = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
else:
_A = []
for i in range(UpperCamelCase__ ):
_A = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
_A = output
def __A ( self: Any , __A: Union[str, Any] , __A: List[str]=False , __A: List[Any]=1.0 ) -> Optional[int]:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A = []
for i in range(len(UpperCamelCase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_A = self.token_map[placeholder_token]
_A = tokens[: 1 + int(len(UpperCamelCase__ ) * prop_tokens_to_load )]
if vector_shuffle:
_A = copy.copy(UpperCamelCase__ )
random.shuffle(UpperCamelCase__ )
_A = text.replace(UpperCamelCase__ , ''' '''.join(UpperCamelCase__ ) )
return text
def __call__( self: Optional[int] , __A: List[str] , *__A: Dict , __A: List[Any]=False , __A: str=1.0 , **__A: str ) -> List[str]:
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
def __A ( self: int , __A: List[str] , *__A: List[str] , __A: Optional[int]=False , __A: Union[str, Any]=1.0 , **__A: Optional[Any] ) -> Optional[Any]:
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
| 709 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_token_type_ids
_A = use_input_mask
_A = use_labels
_A = use_mc_token_ids
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = self.vocab_size - 1
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
if self.use_mc_token_ids:
_A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
_A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self: Optional[int] ) -> List[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]:
_A = CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
_A = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str:
_A = CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
_A = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self: Optional[int] ) -> Dict:
_A = self.prepare_config_and_inputs()
(
(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,
) = config_and_inputs
_A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any:
_A = self.num_labels
_A = CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
A_ = (CTRLLMHeadModel,) if is_torch_available() else ()
A_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __A ( self: Any ) -> Union[str, Any]:
_A = CTRLModelTester(self )
_A = ConfigTester(self , config_class=__A , n_embd=37 )
def __A ( self: Optional[int] ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __A ( self: Dict ) -> Any:
self.config_tester.run_common_tests()
def __A ( self: str ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def __A ( self: List[str] ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: Optional[Any] ) -> int:
pass
@slow
def __A ( self: Tuple ) -> Dict:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __A ( self: Any ) -> Union[str, Any]:
pass
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: int ) -> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __A ( self: Any ) -> Any:
_A = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__A )
_A = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is
_A = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
_A = model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 62 | 0 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( _lowercase , _lowercase , _lowercase , _lowercase=5 ):
'''simple docstring'''
assert masked_input.count('''<mask>''' ) == 1
_A = torch.tensor(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) ).unsqueeze(0 ) # Batch size 1
_A = model(_lowercase )[0] # The last hidden-state is the first element of the output tuple
_A = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_A = logits[0, masked_index, :]
_A = logits.softmax(dim=0 )
_A = prob.topk(k=_lowercase , dim=0 )
_A = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowercase ) )] )
_A = tokenizer.mask_token
_A = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
_A = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(_lowercase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(_lowercase ) , _lowercase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_lowercase , _lowercase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__A = CamembertTokenizer.from_pretrained('camembert-base')
__A = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
__A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 710 |
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = True
_A = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowercase , _lowercase , _lowercase )
order.append(_lowercase )
return order
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = True
_A = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowercase , _lowercase , _lowercase )
return component
def __A ( _lowercase ):
'''simple docstring'''
_A = len(_lowercase ) * [False]
_A = {vert: [] for vert in range(len(_lowercase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowercase )
_A = []
for i, was_visited in enumerate(_lowercase ):
if not was_visited:
order += topology_sort(_lowercase , _lowercase , _lowercase )
_A = []
_A = len(_lowercase ) * [False]
for i in range(len(_lowercase ) ):
_A = order[len(_lowercase ) - i - 1]
if not visited[vert]:
_A = find_components(_lowercase , _lowercase , _lowercase )
components_list.append(_lowercase )
return components_list
| 62 | 0 |
import math
def __A ( _lowercase ):
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_A = range(3 , int(math.sqrt(__snake_case ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __A ( _lowercase , _lowercase=1 , **_lowercase ):
'''simple docstring'''
_A = factor * value
_A = value
while not is_prime(__snake_case ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__snake_case )
return value
| 711 |
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase )
else:
_A = max(
mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , )
_A = val
return f[i][j]
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_A = dp[i - 1][w_]
return dp[n][w_], dp
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
_A = len(_lowercase )
if num_items != len(_lowercase ):
_A = (
'''The number of weights must be the same as the number of values.\n'''
f"""But got {num_items} weights and {len(_lowercase )} values"""
)
raise ValueError(_lowercase )
for i in range(_lowercase ):
if not isinstance(wt[i] , _lowercase ):
_A = (
'''All weights must be integers but got weight of '''
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(_lowercase )
_A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase )
_A = set()
_construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return optimal_val, example_optional_set
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase )
else:
optimal_set.add(_lowercase )
_construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase )
if __name__ == "__main__":
__A = [3, 2, 4, 4]
__A = [4, 3, 2, 3]
__A = 4
__A = 6
__A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__A , __A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__A , __A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 62 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__A = datasets.logging.get_logger(__name__)
__A = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
__A = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
__A = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n'
__A = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __A ( self: Dict ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def __A ( self: int , __A: int ) -> Union[str, Any]:
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
_A = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
_A = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_A = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
_A = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_A = score.BleurtScorer(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
def __A ( self: Union[str, Any] , __A: List[str] , __A: Optional[int] ) -> Optional[int]:
_A = self.scorer.score(references=lowerCamelCase_ , candidates=lowerCamelCase_ )
return {"scores": scores}
| 712 |
def __A ( _lowercase = 1_00_00_00 ):
'''simple docstring'''
_A = 1
_A = 1
_A = {1: 1}
for inputa in range(2 , _lowercase ):
_A = 0
_A = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_A = (3 * number) + 1
counter += 1
if inputa not in counters:
_A = counter
if counter > pre_counter:
_A = inputa
_A = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 62 | 0 |
import operator as op
__A = 'scaler.pt'
__A = 'pytorch_model'
__A = 'random_states'
__A = 'optimizer'
__A = 'scheduler'
__A = 'pytorch_model.bin'
__A = 'pytorch_model.bin.index.json'
__A = 'model.safetensors'
__A = 'model.safetensors.index.json'
__A = '1.10.2'
__A = 'py38'
__A = '4.17.0'
__A = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
__A = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
__A = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
__A = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
__A = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
__A = '2.0.1'
__A = ['pdsh', 'standard', 'openmpi', 'mvapich']
__A = ['default', 'reduce-overhead', 'max-autotune']
__A = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__A = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
__A = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
__A = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 713 |
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = word.split()
def justify(_lowercase , _lowercase , _lowercase ) -> str:
_A = max_width - width
_A = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_A = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_A = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_A = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_A = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_A = []
_A = []
_A = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase , _lowercase , _lowercase ) )
# reset new line and new width
_A ,_A = [word], len(_lowercase )
_A = max_width - width - len(_lowercase )
answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62 | 0 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 714 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__A = '\\n Text data.\n Second line of data.'
__A = 'file'
@pytest.fixture(scope='''session''' )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_A = bytes(_lowercase , '''utf-8''' )
with zstd.open(_lowercase , '''wb''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture
def __A ( _lowercase ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f:
f.write(_lowercase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_A = input_paths[compression_format]
_A = tmp_path / '''cache'''
_A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase )
_A = cached_path(_lowercase , download_config=_lowercase )
with open(_lowercase ) as f:
_A = f.read()
with open(_lowercase ) as f:
_A = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = '''custom_cache'''
_A = '''custom_extracted_dir'''
_A = tmp_path / '''custom_extracted_path'''
if default_extracted:
_A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) )
_A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_A = xz_file
_A = (
DownloadConfig(extract_compressed_file=_lowercase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase )
)
_A = cached_path(_lowercase , download_config=_lowercase )
assert Path(_lowercase ).parent.parts[-2:] == expected
def __A ( _lowercase ):
'''simple docstring'''
_A = str(Path(_lowercase ).resolve() )
assert cached_path(_lowercase ) == text_file
# relative path
_A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowercase ) == text_file
def __A ( _lowercase ):
'''simple docstring'''
_A = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_lowercase ):
cached_path(_lowercase )
# relative path
_A = '''./__missing_file__.txt'''
with pytest.raises(_lowercase ):
cached_path(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(_lowercase ) as f:
_A = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( ):
'''simple docstring'''
with pytest.raises(_lowercase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
http_get('''https://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
fsspec_head('''s3://huggingface.co''' )
| 62 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__A = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__A = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
__A = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __A ( self: str ) -> str:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install \"sacrebleu>=1.4.12\"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __A ( self: Any , __A: List[str] , __A: int , __A: int = CHRF.CHAR_ORDER , __A: int = CHRF.WORD_ORDER , __A: int = CHRF.BETA , __A: bool = False , __A: bool = False , __A: bool = False , ) -> int:
_A = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_A = [[refs[i] for refs in references] for i in range(lowercase_ )]
_A = CHRF(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_A = sb_chrf.corpus_score(lowercase_ , lowercase_ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 715 |
import math
def __A ( _lowercase ):
'''simple docstring'''
_A = []
_A = 2
_A = int(math.sqrt(_lowercase ) ) # Size of every segment
_A = [True] * (end + 1)
_A = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowercase )
for i in range(start * start , end + 1 , _lowercase ):
_A = False
start += 1
prime += in_prime
_A = end + 1
_A = min(2 * end , _lowercase )
while low <= n:
_A = [True] * (high - low + 1)
for each in in_prime:
_A = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowercase , high + 1 , _lowercase ):
_A = False
for j in range(len(_lowercase ) ):
if temp[j] is True:
prime.append(j + low )
_A = high + 1
_A = min(high + end , _lowercase )
return prime
print(sieve(10**6))
| 62 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class SCREAMING_SNAKE_CASE ( __lowerCAmelCase ):
"""simple docstring"""
A_ = '''speech_to_text'''
A_ = ['''past_key_values''']
A_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: int , __A: Optional[Any]=1_00_00 , __A: Dict=12 , __A: Any=20_48 , __A: Tuple=4 , __A: List[str]=6 , __A: str=20_48 , __A: List[Any]=4 , __A: Optional[int]=0.0 , __A: List[Any]=0.0 , __A: Union[str, Any]=True , __A: Tuple=True , __A: int="relu" , __A: Optional[Any]=2_56 , __A: Union[str, Any]=0.1 , __A: int=0.0 , __A: Union[str, Any]=0.0 , __A: Dict=0.02 , __A: Dict=2 , __A: int=True , __A: List[Any]=1 , __A: List[Any]=0 , __A: Union[str, Any]=2 , __A: Optional[Any]=60_00 , __A: List[Any]=10_24 , __A: Tuple=2 , __A: List[str]=(5, 5) , __A: int=10_24 , __A: Optional[int]=80 , __A: Dict=1 , **__A: Union[str, Any] , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(lowerCAmelCase_ )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 716 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = jnp.floataa
def __A ( self: Tuple ) -> Tuple:
_A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __A: Dict ) -> Tuple:
_A ,_A ,_A ,_A = hidden_states.shape
_A = jax.image.resize(
__A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
_A = self.conv(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = jnp.floataa
def __A ( self: List[str] ) -> Tuple:
_A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_A = self.conv(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = None
A_ = 0.0
A_ = None
A_ = jnp.floataa
def __A ( self: Dict ) -> Dict:
_A = self.in_channels if self.out_channels is None else self.out_channels
_A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_A = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_A = nn.Dense(__A , dtype=self.dtype )
_A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_A = nn.Dropout(self.dropout_prob )
_A = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_A = None
if use_nin_shortcut:
_A = nn.Conv(
__A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]:
_A = hidden_states
_A = self.norma(__A )
_A = nn.swish(__A )
_A = self.conva(__A )
_A = self.time_emb_proj(nn.swish(__A ) )
_A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 )
_A = hidden_states + temb
_A = self.norma(__A )
_A = nn.swish(__A )
_A = self.dropout(__A , __A )
_A = self.conva(__A )
if self.conv_shortcut is not None:
_A = self.conv_shortcut(__A )
return hidden_states + residual
| 62 | 0 |
def __A ( _lowercase ):
'''simple docstring'''
_A = [[0 for _ in range(_lowercase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_A = 1
for n in range(m + 1 ):
for k in range(1 , _lowercase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__A = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__A = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 717 |
def __A ( _lowercase ):
'''simple docstring'''
_A = [0] * len(_lowercase )
_A = []
_A = []
_A = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowercase ) ):
if indegree[i] == 0:
queue.append(_lowercase )
while queue:
_A = queue.pop(0 )
cnt += 1
topo.append(_lowercase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowercase )
if cnt != len(_lowercase ):
print('''Cycle exists''' )
else:
print(_lowercase )
# Adjacency List of Graph
__A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 62 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
A_ = 42
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self: Dict , __A: int=3 , __A: Any=3 , __A: Union[str, Any]=("DownEncoderBlock2D",) , __A: Any=(64,) , __A: Optional[Any]=2 , __A: Tuple=32 , __A: Any="silu" , __A: Dict=True , ) -> Union[str, Any]:
super().__init__()
_A = layers_per_block
_A = torch.nn.Convad(
snake_case__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_A = None
_A = nn.ModuleList([] )
# down
_A = block_out_channels[0]
for i, down_block_type in enumerate(snake_case__ ):
_A = output_channel
_A = block_out_channels[i]
_A = i == len(snake_case__ ) - 1
_A = get_down_block(
snake_case__ , num_layers=self.layers_per_block , in_channels=snake_case__ , out_channels=snake_case__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , )
self.down_blocks.append(snake_case__ )
# mid
_A = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , )
# out
_A = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=snake_case__ , eps=1e-6 )
_A = nn.SiLU()
_A = 2 * out_channels if double_z else out_channels
_A = nn.Convad(block_out_channels[-1] , snake_case__ , 3 , padding=1 )
_A = False
def __A ( self: Optional[int] , __A: int ) -> Optional[int]:
_A = x
_A = self.conv_in(snake_case__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__A: Dict ):
def custom_forward(*__A: Any ):
return module(*snake_case__ )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
_A = torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case__ ) , snake_case__ , use_reentrant=snake_case__ )
# middle
_A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , use_reentrant=snake_case__ )
else:
for down_block in self.down_blocks:
_A = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ )
# middle
_A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , snake_case__ )
else:
# down
for down_block in self.down_blocks:
_A = down_block(snake_case__ )
# middle
_A = self.mid_block(snake_case__ )
# post-process
_A = self.conv_norm_out(snake_case__ )
_A = self.conv_act(snake_case__ )
_A = self.conv_out(snake_case__ )
return sample
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self: Optional[Any] , __A: Dict=3 , __A: Optional[Any]=3 , __A: Tuple=("UpDecoderBlock2D",) , __A: Tuple=(64,) , __A: Union[str, Any]=2 , __A: List[str]=32 , __A: str="silu" , __A: Dict="group" , ) -> Tuple:
super().__init__()
_A = layers_per_block
_A = nn.Convad(
snake_case__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_A = None
_A = nn.ModuleList([] )
_A = in_channels if norm_type == "spatial" else None
# mid
_A = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , )
# up
_A = list(reversed(snake_case__ ) )
_A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(snake_case__ ):
_A = output_channel
_A = reversed_block_out_channels[i]
_A = i == len(snake_case__ ) - 1
_A = get_up_block(
snake_case__ , num_layers=self.layers_per_block + 1 , in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , resnet_time_scale_shift=snake_case__ , )
self.up_blocks.append(snake_case__ )
_A = output_channel
# out
if norm_type == "spatial":
_A = SpatialNorm(block_out_channels[0] , snake_case__ )
else:
_A = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=snake_case__ , eps=1e-6 )
_A = nn.SiLU()
_A = nn.Convad(block_out_channels[0] , snake_case__ , 3 , padding=1 )
_A = False
def __A ( self: Optional[Any] , __A: Any , __A: Any=None ) -> Union[str, Any]:
_A = z
_A = self.conv_in(snake_case__ )
_A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__A: Dict ):
def custom_forward(*__A: Any ):
return module(*snake_case__ )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
_A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ )
_A = sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
_A = torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ )
else:
# middle
_A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ )
_A = sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
_A = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ )
else:
# middle
_A = self.mid_block(snake_case__ , snake_case__ )
_A = sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
_A = up_block(snake_case__ , snake_case__ )
# post-process
if latent_embeds is None:
_A = self.conv_norm_out(snake_case__ )
else:
_A = self.conv_norm_out(snake_case__ , snake_case__ )
_A = self.conv_act(snake_case__ )
_A = self.conv_out(snake_case__ )
return sample
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self: Optional[Any] , __A: int , __A: str , __A: int , __A: Tuple=None , __A: Optional[int]="random" , __A: int=False , __A: Dict=True ) -> Any:
super().__init__()
_A = n_e
_A = vq_embed_dim
_A = beta
_A = legacy
_A = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_A = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
_A = self.used.shape[0]
_A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_A = self.re_embed
_A = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
_A = n_e
_A = sane_index_shape
def __A ( self: List[Any] , __A: Any ) -> int:
_A = inds.shape
assert len(snake_case__ ) > 1
_A = inds.reshape(ishape[0] , -1 )
_A = self.used.to(snake_case__ )
_A = (inds[:, :, None] == used[None, None, ...]).long()
_A = match.argmax(-1 )
_A = match.sum(2 ) < 1
if self.unknown_index == "random":
_A = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_A = self.unknown_index
return new.reshape(snake_case__ )
def __A ( self: List[str] , __A: List[str] ) -> Optional[int]:
_A = inds.shape
assert len(snake_case__ ) > 1
_A = inds.reshape(ishape[0] , -1 )
_A = self.used.to(snake_case__ )
if self.re_embed > self.used.shape[0]: # extra token
_A = 0 # simply set to zero
_A = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , snake_case__ )
return back.reshape(snake_case__ )
def __A ( self: List[str] , __A: int ) -> Any:
_A = z.permute(0 , 2 , 3 , 1 ).contiguous()
_A = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_A = torch.argmin(torch.cdist(snake_case__ , self.embedding.weight ) , dim=1 )
_A = self.embedding(snake_case__ ).view(z.shape )
_A = None
_A = None
# compute loss for embedding
if not self.legacy:
_A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_A = z + (z_q - z).detach()
# reshape back to match original input shape
_A = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_A = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_A = self.remap_to_used(snake_case__ )
_A = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_A = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __A ( self: Union[str, Any] , __A: Optional[Any] , __A: List[Any] ) -> Optional[Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_A = indices.reshape(shape[0] , -1 ) # add batch axis
_A = self.unmap_to_all(snake_case__ )
_A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_A = self.embedding(snake_case__ )
if shape is not None:
_A = z_q.view(snake_case__ )
# reshape back to match original input shape
_A = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self: Optional[Any] , __A: Optional[Any] , __A: Tuple=False ) -> Any:
_A = parameters
_A = torch.chunk(snake_case__ , 2 , dim=1 )
_A = torch.clamp(self.logvar , -30.0 , 20.0 )
_A = deterministic
_A = torch.exp(0.5 * self.logvar )
_A = torch.exp(self.logvar )
if self.deterministic:
_A = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __A ( self: Union[str, Any] , __A: Optional[Any] = None ) -> Optional[Any]:
_A = randn_tensor(
self.mean.shape , generator=snake_case__ , device=self.parameters.device , dtype=self.parameters.dtype )
_A = self.mean + self.std * sample
return x
def __A ( self: Optional[Any] , __A: Dict=None ) -> Union[str, Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __A ( self: Union[str, Any] , __A: Dict , __A: Optional[Any]=[1, 2, 3] ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
_A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=snake_case__ )
def __A ( self: List[Any] ) -> str:
return self.mean
| 718 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class SCREAMING_SNAKE_CASE ( snake_case , snake_case ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self: Any , __A: int = 10_00 , __A: Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__A )
# standard deviation of the initial noise distribution
_A = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_A = 4
# running values
_A = []
def __A ( self: str , __A: int , __A: Union[str, torch.device] = None ) -> int:
_A = num_inference_steps
_A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_A = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_A = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_A = torch.sin(steps * math.pi / 2 ) ** 2
_A = (1.0 - self.betas**2) ** 0.5
_A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_A = timesteps.to(__A )
_A = []
def __A ( self: Tuple , __A: torch.FloatTensor , __A: int , __A: torch.FloatTensor , __A: bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
_A = (self.timesteps == timestep).nonzero().item()
_A = timestep_index + 1
_A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__A )
if len(self.ets ) == 1:
_A = self.ets[-1]
elif len(self.ets ) == 2:
_A = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_A = self._get_prev_sample(__A , __A , __A , __A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def __A ( self: Optional[int] , __A: torch.FloatTensor , *__A: Tuple , **__A: List[Any] ) -> torch.FloatTensor:
return sample
def __A ( self: List[str] , __A: Optional[Any] , __A: Optional[Any] , __A: Any , __A: List[Any] ) -> List[Any]:
_A = self.alphas[timestep_index]
_A = self.betas[timestep_index]
_A = self.alphas[prev_timestep_index]
_A = self.betas[prev_timestep_index]
_A = (sample - sigma * ets) / max(__A , 1e-8 )
_A = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[str] ) -> Dict:
return self.config.num_train_timesteps
| 62 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
A_ = CycleDiffusionPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
A_ = PipelineTesterMixin.required_optional_params - {"latents"}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self: Optional[Any] ) -> Tuple:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_A = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
_A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_A = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __A ( self: str , __A: Any , __A: int=0 ) -> List[Any]:
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
_A = image / 2 + 0.5
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
_A = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
_A = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
_A = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self: Optional[int] ) -> Optional[int]:
_A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
_A = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
_A = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
_A = pipe(**SCREAMING_SNAKE_CASE_ )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __A ( self: Tuple ) -> List[str]:
_A = self.get_dummy_components()
for name, module in components.items():
if hasattr(SCREAMING_SNAKE_CASE_ , '''half''' ):
_A = module.half()
_A = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
_A = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
_A = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
_A = pipe(**SCREAMING_SNAKE_CASE_ )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A ( self: Optional[int] ) -> str:
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def __A ( self: Optional[Any] ) -> Optional[int]:
return super().test_inference_batch_single_identical()
@skip_mps
def __A ( self: Optional[Any] ) -> List[str]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __A ( self: Union[str, Any] ) -> List[Any]:
return super().test_save_load_optional_components()
@skip_mps
def __A ( self: Union[str, Any] ) -> Optional[int]:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Tuple ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self: Optional[int] ) -> str:
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
_A = init_image.resize((5_12, 5_12) )
_A = '''CompVis/stable-diffusion-v1-4'''
_A = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder='''scheduler''' )
_A = CycleDiffusionPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
_A = '''A black colored car'''
_A = '''A blue colored car'''
_A = torch.manual_seed(0 )
_A = pipe(
prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
_A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def __A ( self: Tuple ) -> Tuple:
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
_A = init_image.resize((5_12, 5_12) )
_A = '''CompVis/stable-diffusion-v1-4'''
_A = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder='''scheduler''' )
_A = CycleDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
_A = '''A black colored car'''
_A = '''A blue colored car'''
_A = torch.manual_seed(0 )
_A = pipe(
prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
_A = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 719 |
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A ,_A = len(_lowercase ), len(grid[0] )
if (
min(_lowercase , _lowercase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_A = 0
count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__A = NewType('DataClass', Any)
__A = NewType('DataClassType', Any)
def __A ( _lowercase ):
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __A ( _lowercase ):
'''simple docstring'''
_A = {str(_lowercase ): choice for choice in choices}
return lambda _lowercase : str_to_choice.get(_lowercase , _lowercase )
def __A ( *,
_lowercase = None , _lowercase = None , _lowercase = dataclasses.MISSING , _lowercase = dataclasses.MISSING , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_A = {}
if aliases is not None:
_A = aliases
if help is not None:
_A = help
return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 42
def __init__( self: Optional[Any] , __A: Union[DataClassType, Iterable[DataClassType]] , **__A: List[Any] ) -> str:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_A = ArgumentDefaultsHelpFormatter
super().__init__(**__A )
if dataclasses.is_dataclass(__A ):
_A = [dataclass_types]
_A = list(__A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__A )
@staticmethod
def __A ( __A: ArgumentParser , __A: dataclasses.Field ) -> str:
_A = f"""--{field.name}"""
_A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __A ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
_A = kwargs.pop('''aliases''' , [] )
if isinstance(__A , __A ):
_A = [aliases]
_A = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__A , '''UnionType''' ) and isinstance(__A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__A ) not in field.type.__args__:
# filter `str` in Union
_A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_A = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_A = (
field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1]
)
_A = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_A = {}
if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )):
if origin_type is Literal:
_A = field.type.__args__
else:
_A = [x.value for x in field.type]
_A = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
_A = field.default
else:
_A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_A = copy(__A )
# Hack because type=bool in argparse does not behave as we want.
_A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_A = default
# This tells argparse we accept 0 or 1 value after --field_name
_A = '''?'''
# This is the value that will get picked if we do --field_name (without value)
_A = True
elif isclass(__A ) and issubclass(__A , __A ):
_A = field.type.__args__[0]
_A = '''+'''
if field.default_factory is not dataclasses.MISSING:
_A = field.default_factory()
elif field.default is dataclasses.MISSING:
_A = True
else:
_A = field.type
if field.default is not dataclasses.MISSING:
_A = field.default
elif field.default_factory is not dataclasses.MISSING:
_A = field.default_factory()
else:
_A = True
parser.add_argument(__A , *__A , **__A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_A = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__A )
def __A ( self: Dict , __A: DataClassType ) -> List[Any]:
if hasattr(__A , '''_argument_group_name''' ):
_A = self.add_argument_group(dtype._argument_group_name )
else:
_A = self
try:
_A = get_type_hints(__A )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ):
_A = '''.'''.join(map(__A , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__A ):
if not field.init:
continue
_A = type_hints[field.name]
self._parse_dataclass_field(__A , __A )
def __A ( self: int , __A: Any=None , __A: int=False , __A: Any=True , __A: Optional[Any]=None , __A: Any=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_A = []
if args_filename:
args_files.append(Path(__A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_A = ArgumentParser()
args_file_parser.add_argument(__A , type=__A , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
_A ,_A = args_file_parser.parse_known_args(args=__A )
_A = vars(__A ).get(args_file_flag.lstrip('''-''' ) , __A )
if cmd_args_file_paths:
args_files.extend([Path(__A ) for p in cmd_args_file_paths] )
_A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_A = file_args + args if args is not None else file_args + sys.argv[1:]
_A ,_A = self.parse_known_args(args=__A )
_A = []
for dtype in self.dataclass_types:
_A = {f.name for f in dataclasses.fields(__A ) if f.init}
_A = {k: v for k, v in vars(__A ).items() if k in keys}
for k in keys:
delattr(__A , __A )
_A = dtype(**__A )
outputs.append(__A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __A ( self: Tuple , __A: Dict[str, Any] , __A: bool = False ) -> Tuple[DataClass, ...]:
_A = set(args.keys() )
_A = []
for dtype in self.dataclass_types:
_A = {f.name for f in dataclasses.fields(__A ) if f.init}
_A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_A = dtype(**__A )
outputs.append(__A )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__A )}""" )
return tuple(__A )
def __A ( self: Tuple , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]:
with open(Path(__A ) , encoding='''utf-8''' ) as open_json_file:
_A = json.loads(open_json_file.read() )
_A = self.parse_dict(__A , allow_extra_keys=__A )
return tuple(__A )
def __A ( self: List[Any] , __A: str , __A: bool = False ) -> Tuple[DataClass, ...]:
_A = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A )
return tuple(__A )
| 62 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A_ = ["image_processor", "tokenizer"]
A_ = "BlipImageProcessor"
A_ = "AutoTokenizer"
def __init__( self: Dict , __A: List[str] , __A: Optional[int] , __A: List[Any] ) -> Any:
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
# add QFormer tokenizer
_A = qformer_tokenizer
def __call__( self: Union[str, Any] , __A: ImageInput = None , __A: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A: bool = True , __A: Union[bool, str, PaddingStrategy] = False , __A: Union[bool, str, TruncationStrategy] = None , __A: Optional[int] = None , __A: int = 0 , __A: Optional[int] = None , __A: Optional[bool] = None , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = True , __A: Optional[Union[str, TensorType]] = None , **__A: Union[str, Any] , ) -> Optional[Any]:
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
_A = BatchFeature()
if text is not None:
_A = self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
encoding.update(UpperCAmelCase__ )
_A = self.qformer_tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
_A = qformer_text_encoding.pop('''input_ids''' )
_A = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
_A = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
encoding.update(UpperCAmelCase__ )
return encoding
def __A ( self: Dict , *__A: Optional[int] , **__A: Optional[Any] ) -> Dict:
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self: str , *__A: Dict , **__A: Optional[Any] ) -> Dict:
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __A ( self: List[str] ) -> int:
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __A ( self: Dict , __A: int , **__A: str ) -> Optional[int]:
if os.path.isfile(UpperCAmelCase__ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
_A = os.path.join(UpperCAmelCase__ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase__ )
return super().save_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def __A ( cls: Optional[int] , __A: Any , **__A: List[str] ) -> Union[str, Any]:
_A = AutoTokenizer.from_pretrained(UpperCAmelCase__ , subfolder='''qformer_tokenizer''' )
_A = cls._get_arguments_from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
args.append(UpperCAmelCase__ )
return cls(*UpperCAmelCase__ )
| 721 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Optional[int] , __A: Union[str, Any] , __A: int=2 , __A: List[str]=True , __A: List[Any]=False , __A: Union[str, Any]=10 , __A: Optional[int]=3 , __A: List[Any]=32 * 4 , __A: Dict=32 * 6 , __A: Optional[Any]=4 , __A: Any=32 , ) -> str:
_A = parent
_A = batch_size
_A = is_training
_A = use_auxiliary_loss
_A = num_queries
_A = num_channels
_A = min_size
_A = max_size
_A = num_labels
_A = mask_feature_size
def __A ( self: Dict ) -> Optional[int]:
_A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__A )
_A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A )
_A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5
).float()
_A = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long()
_A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __A ( self: Optional[Any] ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __A ( self: Dict ) -> Tuple:
_A ,_A ,_A ,_A ,_A = self.prepare_config_and_inputs()
_A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __A ( self: Optional[int] , __A: Union[str, Any] , __A: Dict ) -> int:
_A = output.encoder_hidden_states
_A = output.pixel_decoder_hidden_states
_A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers )
def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Any , __A: Dict=False ) -> Any:
with torch.no_grad():
_A = MaskFormerModel(config=__A )
model.to(__A )
model.eval()
_A = model(pixel_values=__A , pixel_mask=__A )
_A = model(__A , output_hidden_states=__A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__A , __A )
def __A ( self: Optional[Any] , __A: Union[str, Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Union[str, Any] , __A: List[Any] ) -> int:
_A = MaskFormerForInstanceSegmentation(config=__A )
model.to(__A )
model.eval()
def comm_check_on_output(__A: int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_A = model(pixel_values=__A , pixel_mask=__A )
_A = model(__A )
comm_check_on_output(__A )
_A = model(
pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A )
comm_check_on_output(__A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def __A ( self: int ) -> Tuple:
_A = MaskFormerModelTester(self )
_A = ConfigTester(self , config_class=__A , has_text_modality=__A )
def __A ( self: List[Any] ) -> Dict:
self.config_tester.run_common_tests()
def __A ( self: Optional[Any] ) -> int:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A )
def __A ( self: Dict ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def __A ( self: int ) -> Tuple:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def __A ( self: List[Any] ) -> Any:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def __A ( self: Union[str, Any] ) -> Optional[int]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def __A ( self: int ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __A ( self: Union[str, Any] ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: List[Any] ) -> Any:
pass
def __A ( self: Dict ) -> Optional[Any]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
@slow
def __A ( self: int ) -> Optional[Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_A = MaskFormerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __A ( self: Optional[Any] ) -> Optional[int]:
_A = (self.model_tester.min_size,) * 2
_A = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__A ),
'''mask_labels''': torch.randn((2, 10, *size) , device=__A ),
'''class_labels''': torch.zeros(2 , 10 , device=__A ).long(),
}
_A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A )
_A = model(**__A )
self.assertTrue(outputs.loss is not None )
def __A ( self: Optional[Any] ) -> List[Any]:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A )
def __A ( self: Any ) -> Tuple:
_A ,_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A ).to(__A )
_A = model(**__A , output_attentions=__A )
self.assertTrue(outputs.attentions is not None )
def __A ( self: Dict ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_A = self.all_model_classes[1]
_A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs()
_A = model_class(__A )
model.to(__A )
model.train()
_A = model(__A , mask_labels=__A , class_labels=__A ).loss
loss.backward()
def __A ( self: Tuple ) -> Optional[Any]:
# only MaskFormerForInstanceSegmentation has the loss
_A = self.all_model_classes[1]
_A ,_A ,_A ,_A ,_A = self.model_tester.prepare_config_and_inputs()
_A = True
_A = True
_A = model_class(__A )
model.to(__A )
model.train()
_A = model(__A , mask_labels=__A , class_labels=__A )
_A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def __A ( ):
'''simple docstring'''
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self: Union[str, Any] ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def __A ( self: List[Any] ) -> Any:
_A = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__A , return_tensors='''pt''' ).to(__A )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**__A )
_A = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) )
_A = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) )
_A = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) )
def __A ( self: Dict ) -> Dict:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__A )
.eval()
)
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__A , return_tensors='''pt''' ).to(__A )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**__A )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_A = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
_A = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) )
def __A ( self: List[Any] ) -> Dict:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__A )
.eval()
)
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(__A , return_tensors='''pt''' ).to(__A )
_A = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_A = model(**__A )
# masks_queries_logits
_A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_A = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
_A = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) )
# class_queries_logits
_A = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_A = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) )
def __A ( self: Optional[Any] ) -> str:
_A = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__A )
.eval()
)
_A = self.default_image_processor
_A = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
_A = inputs['''pixel_values'''].to(__A )
_A = [el.to(__A ) for el in inputs['''mask_labels''']]
_A = [el.to(__A ) for el in inputs['''class_labels''']]
with torch.no_grad():
_A = model(**__A )
self.assertTrue(outputs.loss is not None )
| 62 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__A = logging.getLogger(__name__)
__A = 'pytorch_model.bin'
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
A_ = dataclasses.field(
default=__UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
A_ = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
A_ = dataclasses.field(
default=__UpperCAmelCase , metadata={"help": "A csv or a json file containing the validation data."} )
A_ = dataclasses.field(
default=__UpperCAmelCase , metadata={"help": "The name of the task to train on."} , )
A_ = dataclasses.field(
default=__UpperCAmelCase , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
A_ = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
A_ = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
A_ = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
A_ = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
A_ = dataclasses.field(
default=__UpperCAmelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
A_ = dataclasses.field(
default=__UpperCAmelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
A_ = dataclasses.field(
default=__UpperCAmelCase , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
A_ = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
A_ = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
A_ = dataclasses.field(
default=__UpperCAmelCase , metadata={"help": "Random seed for initialization."} , )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_A = dataset.filter(lambda _lowercase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_A = int(eval_result * len(_lowerCAmelCase ) )
print(_lowerCAmelCase )
_A = dataset.sort('''probability''' , reverse=_lowerCAmelCase )
_A = dataset.select(range(_lowerCAmelCase ) )
_A = dataset.remove_columns(['''label''', '''probability'''] )
_A = dataset.rename_column('''prediction''' , '''label''' )
_A = dataset.map(lambda _lowercase : {"label": idalabel[example["label"]]} )
_A = dataset.shuffle(seed=args.seed )
_A = os.path.join(_lowerCAmelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(_lowerCAmelCase , index=_lowerCAmelCase )
else:
dataset.to_json(_lowerCAmelCase )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
_A = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_A = STModelArguments(model_name_or_path=_lowerCAmelCase )
_A = STDataArguments(train_file=_lowerCAmelCase , infer_file=_lowerCAmelCase )
_A = STTrainingArguments(output_dir=_lowerCAmelCase )
_A = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_lowerCAmelCase ).items():
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for key, value in kwargs.items():
if hasattr(_lowerCAmelCase , _lowerCAmelCase ):
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Sanity checks
_A = {}
_A = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_A = args.train_file
_A = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_A = args.eval_file
for key in data_files:
_A = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_A = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
_A = f"""{args.output_dir}/self-train_iter-{{}}""".format
_A = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_lowerCAmelCase )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
accelerator.wait_for_everyone()
_A = None
_A = None
_A = 0
_A = False
# Show the progress bar
_A = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_A = data_dir_format(_lowerCAmelCase )
assert os.path.exists(_lowerCAmelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_A = os.path.join(_lowerCAmelCase , '''stage-1''' )
_A = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_lowerCAmelCase , _lowerCAmelCase ):
arguments_dict.update({key: value} )
_A = os.path.join(_lowerCAmelCase , '''best-checkpoint''' , _lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , _lowerCAmelCase , _lowerCAmelCase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , _lowerCAmelCase )
finetune(**_lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowerCAmelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , _lowerCAmelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_A = os.path.join(_lowerCAmelCase , '''best-checkpoint''' )
_A = os.path.join(_lowerCAmelCase , '''stage-2''' )
# Update arguments_dict
_A = model_path
_A = data_files['''train''']
_A = current_output_dir
_A = os.path.join(_lowerCAmelCase , '''best-checkpoint''' , _lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , _lowerCAmelCase , _lowerCAmelCase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , _lowerCAmelCase )
finetune(**_lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowerCAmelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , _lowerCAmelCase )
_A = iteration
_A = data_dir_format(iteration + 1 )
_A = AutoConfig.from_pretrained(os.path.join(_lowerCAmelCase , '''best-checkpoint''' ) )
_A = config.idalabel
_A = os.path.join(_lowerCAmelCase , '''eval_results_best-checkpoint.json''' )
_A = os.path.join(_lowerCAmelCase , '''test_results_best-checkpoint.json''' )
assert os.path.exists(_lowerCAmelCase )
with open(_lowerCAmelCase , '''r''' ) as f:
_A = float(json.load(_lowerCAmelCase )[args.eval_metric] )
_A = os.path.join(_lowerCAmelCase , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(_lowerCAmelCase )
# Loading the dataset from local csv or json files.
_A = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
_A = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
shutil.copy(_lowerCAmelCase , os.path.join(_lowerCAmelCase , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(_lowerCAmelCase ):
shutil.copy(_lowerCAmelCase , os.path.join(_lowerCAmelCase , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
accelerator.wait_for_everyone()
_A = os.path.join(_lowerCAmelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_A = eval_result
if best_iteration is None:
_A = new_iteration
_A = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_A = new_iteration
_A = new_eval_result
_A = 0
else:
if new_eval_result == best_eval_result:
_A = new_iteration
_A = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_A = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , _lowerCAmelCase )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , _lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowerCAmelCase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(_lowerCAmelCase , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , _lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowerCAmelCase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(_lowerCAmelCase , '''eval_results_best-iteration.json''' ) , )
| 700 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: int , __A: Optional[int] , __A: Optional[Any] ) -> str:
_A = question_encoder
_A = generator
_A = self.question_encoder
def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict:
if os.path.isfile(__A ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__A , exist_ok=__A )
_A = os.path.join(__A , '''question_encoder_tokenizer''' )
_A = os.path.join(__A , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__A )
self.generator.save_pretrained(__A )
@classmethod
def __A ( cls: Optional[Any] , __A: List[str] , **__A: int ) -> Any:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_A = kwargs.pop('''config''' , __A )
if config is None:
_A = RagConfig.from_pretrained(__A )
_A = AutoTokenizer.from_pretrained(
__A , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
_A = AutoTokenizer.from_pretrained(
__A , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__A , generator=__A )
def __call__( self: int , *__A: Optional[int] , **__A: List[str] ) -> int:
return self.current_tokenizer(*__A , **__A )
def __A ( self: Dict , *__A: List[str] , **__A: List[str] ) -> Dict:
return self.generator.batch_decode(*__A , **__A )
def __A ( self: Union[str, Any] , *__A: Tuple , **__A: List[str] ) -> Tuple:
return self.generator.decode(*__A , **__A )
def __A ( self: Dict ) -> List[str]:
_A = self.question_encoder
def __A ( self: Union[str, Any] ) -> int:
_A = self.generator
def __A ( self: Dict , __A: List[str] , __A: Optional[List[str]] = None , __A: Optional[int] = None , __A: Optional[int] = None , __A: str = "longest" , __A: str = None , __A: bool = True , **__A: Tuple , ) -> BatchEncoding:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __A , )
if max_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
__A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_A = self.current_tokenizer.model_max_length
_A = self(
text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , )
_A = labels['''input_ids''']
return model_inputs
| 62 | 0 |
# Imports
import numpy as np
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: int , __A: Any=None , __A: Dict=None , __A: Dict=None , __A: Any=None , __A: Any=None ) -> Optional[int]:
self.set_matricies(red=__lowerCamelCase , green=__lowerCamelCase , blue=__lowerCamelCase , red_edge=__lowerCamelCase , nir=__lowerCamelCase )
def __A ( self: str , __A: List[Any]=None , __A: Optional[Any]=None , __A: Tuple=None , __A: Optional[int]=None , __A: Optional[Any]=None ) -> List[str]:
if red is not None:
_A = red
if green is not None:
_A = green
if blue is not None:
_A = blue
if red_edge is not None:
_A = red_edge
if nir is not None:
_A = nir
return True
def __A ( self: Union[str, Any] , __A: List[Any]="" , __A: int=None , __A: int=None , __A: Tuple=None , __A: int=None , __A: Dict=None ) -> Optional[int]:
self.set_matricies(red=__lowerCamelCase , green=__lowerCamelCase , blue=__lowerCamelCase , red_edge=__lowerCamelCase , nir=__lowerCamelCase )
_A = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def __A ( self: Dict ) -> int:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __A ( self: Optional[Any] ) -> Dict:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __A ( self: List[Any] ) -> str:
return self.nir * (self.red / (self.green**2))
def __A ( self: List[Any] ) -> Union[str, Any]:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __A ( self: int ) -> int:
return (self.nir - self.red) / (self.nir + self.red)
def __A ( self: Union[str, Any] ) -> Union[str, Any]:
return (self.nir - self.blue) / (self.nir + self.blue)
def __A ( self: List[Any] ) -> str:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __A ( self: Dict ) -> List[str]:
return (self.nir - self.green) / (self.nir + self.green)
def __A ( self: Tuple ) -> Any:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __A ( self: List[str] ) -> Optional[Any]:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __A ( self: Dict ) -> Optional[int]:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __A ( self: Tuple ) -> int:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __A ( self: Union[str, Any] , __A: Dict=0.08 , __A: List[Any]=1.22 , __A: Optional[int]=0.03 ) -> Union[str, Any]:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __A ( self: Optional[Any] ) -> Union[str, Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __A ( self: Optional[int] ) -> Any:
return (self.nir / self.green) - 1
def __A ( self: str ) -> int:
return (self.nir / self.redEdge) - 1
def __A ( self: Optional[int] ) -> List[str]:
return (self.red - self.blue) / self.red
def __A ( self: List[str] ) -> Any:
_A = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __A ( self: List[Any] ) -> Optional[int]:
return self.nir - self.green
def __A ( self: str ) -> Optional[Any]:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __A ( self: Optional[int] ) -> Tuple:
_A = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __A ( self: str , __A: List[Any]=0.16 ) -> Union[str, Any]:
return (self.nir - self.green) / (self.nir + self.green + y)
def __A ( self: str , __A: str=0.5 ) -> str:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __A ( self: Optional[Any] ) -> Optional[int]:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __A ( self: Tuple , __A: Union[str, Any]=None , __A: List[Any]=None ) -> str:
return (self.nir - b) / (a * self.red)
def __A ( self: Any ) -> str:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __A ( self: Any ) -> Tuple:
return (self.red + self.green + self.blue) / 30.5
def __A ( self: List[Any] ) -> Dict:
return self.nir / self.red
def __A ( self: Union[str, Any] ) -> List[Any]:
return (self.rvi() - 1) / (self.rvi() + 1)
def __A ( self: List[str] ) -> List[Any]:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __A ( self: int ) -> Optional[Any]:
return self.green / (self.nir + self.red + self.green)
def __A ( self: Optional[int] ) -> str:
return self.nir / (self.nir + self.red + self.green)
def __A ( self: Any ) -> int:
return self.red / (self.nir + self.red + self.green)
def __A ( self: Dict ) -> str:
return (self.green - self.red) / (self.green + self.red)
def __A ( self: List[str] ) -> Dict:
return (self.red - self.green) / (self.red + self.green)
def __A ( self: Optional[int] ) -> Optional[Any]:
_A = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_A = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __A ( self: Dict ) -> Optional[int]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __A ( self: Optional[int] ) -> Optional[int]:
return self.nir / self.red
def __A ( self: Union[str, Any] ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def __A ( self: Optional[int] ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 701 |
from __future__ import annotations
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ): # noqa: E741
'''simple docstring'''
while r - l > 1:
_A = (l + r) // 2
if v[m] >= key:
_A = m
else:
_A = m # noqa: E741
return r
def __A ( _lowercase ):
'''simple docstring'''
if len(_lowercase ) == 0:
return 0
_A = [0] * len(_lowercase )
_A = 1
_A = v[0]
for i in range(1 , len(_lowercase ) ):
if v[i] < tail[0]:
_A = v[i]
elif v[i] > tail[length - 1]:
_A = v[i]
length += 1
else:
_A = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __A ( _lowercase ):
'''simple docstring'''
_A = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
_A ,_A = emb.weight.shape
_A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
_A = emb.weight.data
return lin_layer
def __A ( _lowercase ):
'''simple docstring'''
_A = torch.load(UpperCamelCase__ , map_location='''cpu''' )
_A = Namespace(**checkpoint['''cfg''']['''model'''] )
_A = checkpoint['''model''']
remove_ignore_keys_(UpperCamelCase__ )
_A = state_dict['''decoder.embed_tokens.weight'''].shape[0]
_A = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
_A = XGLMConfig(
vocab_size=UpperCamelCase__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_A = XGLMForCausalLM(UpperCamelCase__ )
_A = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
print(UpperCamelCase__ )
_A = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__A = parser.parse_args()
__A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 702 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "sequence-classification"
def __init__( self: str , __A: Union[str, Any] ) -> List[str]:
if type(__A ) == dict:
_A = Namespace(**__A )
_A = glue_output_modes[hparams.task]
_A = glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def __A ( self: Optional[Any] , **__A: Union[str, Any] ) -> Optional[int]:
return self.model(**__A )
def __A ( self: Any , __A: Union[str, Any] , __A: int ) -> Optional[Any]:
_A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_A = self(**__A )
_A = outputs[0]
_A = self.trainer.lr_schedulers[0]['''scheduler''']
_A = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __A ( self: List[str] ) -> Dict:
_A = self.hparams
_A = processors[args.task]()
_A = processor.get_labels()
for mode in ["train", "dev"]:
_A = self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __A )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
_A = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
_A = convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __A )
torch.save(__A , __A )
def __A ( self: List[str] , __A: str , __A: int , __A: bool = False ) -> DataLoader:
_A = '''dev''' if mode == '''test''' else mode
_A = self._feature_file(__A )
logger.info('''Loading features from cached file %s''' , __A )
_A = torch.load(__A )
_A = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_A = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_A = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def __A ( self: List[str] , __A: str , __A: Tuple ) -> str:
_A = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_A = self(**__A )
_A ,_A = outputs[:2]
_A = logits.detach().cpu().numpy()
_A = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __A ( self: str , __A: Dict ) -> tuple:
_A = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
_A = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_A = np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_A = np.squeeze(__A )
_A = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
_A = [[] for _ in range(out_label_ids.shape[0] )]
_A = [[] for _ in range(out_label_ids.shape[0] )]
_A = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_A = dict(results.items() )
_A = results
return ret, preds_list, out_label_list
def __A ( self: Any , __A: list ) -> dict:
_A ,_A ,_A = self._eval_end(__A )
_A = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __A ( self: int , __A: Union[str, Any] ) -> dict:
_A ,_A ,_A = self._eval_end(__A )
_A = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __A ( __A: Optional[Any] , __A: Optional[Any] ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=__A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__A , required=__A , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __A ( ):
'''simple docstring'''
_A = argparse.ArgumentParser()
add_generic_args(_lowercase , os.getcwd() )
_A = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() )
_A = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_A = os.path.join(
'''./results''' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , )
os.makedirs(args.output_dir )
_A = GLUETransformer(_lowercase )
_A = generic_train(_lowercase , _lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_A = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_lowercase ) )
_A = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowercase )
if __name__ == "__main__":
main()
| 62 | 0 |
import unittest
import numpy as np
def __A ( _lowercase , _lowercase , _lowercase , _lowercase = None , ):
'''simple docstring'''
_A = np.shape(__SCREAMING_SNAKE_CASE )
_A = np.shape(__SCREAMING_SNAKE_CASE )
_A = np.shape(__SCREAMING_SNAKE_CASE )
if shape_a[0] != shape_b[0]:
_A = (
"Expected the same number of rows for A and B. "
f"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__SCREAMING_SNAKE_CASE )
if shape_b[1] != shape_c[1]:
_A = (
"Expected the same number of columns for B and C. "
f"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__SCREAMING_SNAKE_CASE )
_A = pseudo_inv
if a_inv is None:
try:
_A = np.linalg.inv(__SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Tuple ) -> List[Any]:
_A = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_A = np.array([[0, 3], [3, 0], [2, 3]] )
_A = np.array([[2, 1], [6, 3]] )
_A = schur_complement(lowercase_ , lowercase_ , lowercase_ )
_A = np.block([[a, b], [b.T, c]] )
_A = np.linalg.det(lowercase_ )
_A = np.linalg.det(lowercase_ )
_A = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def __A ( self: List[str] ) -> Optional[int]:
_A = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_A = np.array([[0, 3], [3, 0], [2, 3]] )
_A = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def __A ( self: Union[str, Any] ) -> Dict:
_A = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_A = np.array([[0, 3], [3, 0], [2, 3]] )
_A = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 703 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __A ( _lowercase = "" ):
'''simple docstring'''
_A = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
_A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' )
_A = soup.find_all('''td''' , attrs='''titleColumn''' )
_A = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowercase , _lowercase )
}
def __A ( _lowercase = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
_A = get_imdb_top_aaa_movies()
with open(_lowercase , '''w''' , newline='''''' ) as out_file:
_A = csv.writer(_lowercase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 62 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__A = "scheduler_config.json"
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
"""simple docstring"""
A_ = 1
A_ = 2
A_ = 3
A_ = 4
A_ = 5
@dataclass
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
"""simple docstring"""
A_ = 42
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = SCHEDULER_CONFIG_NAME
A_ = ['''dtype''']
A_ = []
A_ = True
@classmethod
def __A ( cls: List[Any] , __A: Dict[str, Any] = None , __A: Optional[str] = None , __A: str=False , **__A: Union[str, Any] , ) -> Any:
_A ,_A = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
_A ,_A = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , '''create_state''' ) and getattr(A_ , '''has_state''' , A_ ):
_A = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __A ( self: List[Any] , __A: Union[str, os.PathLike] , __A: bool = False , **__A: Dict ) -> int:
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def __A ( self: Dict ) -> Union[str, Any]:
return self._get_compatibles()
@classmethod
def __A ( cls: Any ) -> Union[str, Any]:
_A = list(set([cls.__name__] + cls._compatibles ) )
_A = importlib.import_module(__name__.split('''.''' )[0] )
_A = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
assert len(_lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowercase ) - x.ndim) ) , _lowercase )
def __A ( _lowercase , _lowercase=0.9_99 , _lowercase=jnp.floataa ):
'''simple docstring'''
def alpha_bar(_lowercase ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_A = []
for i in range(_lowercase ):
_A = i / num_diffusion_timesteps
_A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowercase ) / alpha_bar(_lowercase ) , _lowercase ) )
return jnp.array(_lowercase , dtype=_lowercase )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
@classmethod
def __A ( cls: List[str] , __A: Union[str, Any] ) -> str:
_A = scheduler.config
if config.trained_betas is not None:
_A = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_A = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
_A = 1.0 - betas
_A = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = state.alphas_cumprod
_A = alphas_cumprod[timesteps] ** 0.5
_A = sqrt_alpha_prod.flatten()
_A = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
_A = (1 - alphas_cumprod[timesteps]) ** 0.5
_A = sqrt_one_minus_alpha_prod.flatten()
_A = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A ,_A = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
_A = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A ,_A = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
_A = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 704 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = BlenderbotSmallTokenizer
A_ = False
def __A ( self: List[str] ) -> int:
super().setUp()
_A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def __A ( self: str , **__A: Optional[Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: str , __A: List[str] ) -> int:
_A = '''adapt act apte'''
_A = '''adapt act apte'''
return input_text, output_text
def __A ( self: Union[str, Any] ) -> Any:
_A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = '''adapt act apte'''
_A = ['''adapt''', '''act''', '''ap@@''', '''te''']
_A = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def __A ( self: Any ) -> List[str]:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
_A = '''I am a small frog.'''
_A = tok([src_text] , padding=__A , truncation=__A )['''input_ids''']
_A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __A ( self: Any ) -> int:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_A = '''I am a small frog .'''
_A = '''.'''
_A = tok(__A )['''input_ids''']
_A = tok(__A )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 62 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "roberta"
def __init__( self: Dict , __A: int=5_02_65 , __A: Union[str, Any]=7_68 , __A: Union[str, Any]=12 , __A: str=12 , __A: int=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.1 , __A: int=0.1 , __A: Optional[int]=5_12 , __A: Union[str, Any]=2 , __A: str=0.02 , __A: str=1e-12 , __A: Any=1 , __A: str=0 , __A: Any=2 , __A: Optional[int]="absolute" , __A: Optional[Any]=True , __A: Union[str, Any]=None , **__A: List[str] , ) -> Dict:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@property
def __A ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 62 | 0 |
import argparse
import datetime
def __A ( _lowercase ):
'''simple docstring'''
_A = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
_A = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_SCREAMING_SNAKE_CASE ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
_A = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
_A = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
_A = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
_A = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
_A = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
_A = datetime.date(int(_SCREAMING_SNAKE_CASE ) , int(_SCREAMING_SNAKE_CASE ) , int(_SCREAMING_SNAKE_CASE ) )
# Start math
if m <= 2:
_A = y - 1
_A = m + 12
# maths var
_A = int(str(_SCREAMING_SNAKE_CASE )[:2] )
_A = int(str(_SCREAMING_SNAKE_CASE )[2:] )
_A = int(2.6 * m - 5.39 )
_A = int(c / 4 )
_A = int(k / 4 )
_A = int(d + k )
_A = int(t + u + v + x )
_A = int(z - (2 * c) )
_A = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
_A = f"""Your date {date_input}, is a {days[str(_SCREAMING_SNAKE_CASE )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
__A = parser.parse_args()
zeller(args.date_input)
| 706 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__A = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: int , *__A: str , __A: List[Any]=None , __A: Union[str, Any]=None , __A: List[Any]=None , **__A: int ) -> List[Any]:
super().__init__(*__A , **__A )
_A = eval_examples
_A = post_process_function
_A = quant_trainer_args
_A = 1_28 # default number of calibration samples
def __A ( self: Union[str, Any] , __A: List[Any]=None ) -> Optional[Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
_A = calib_dataset if calib_dataset is not None else self.calib_dataset
_A = self._remove_unused_columns(__A , description='''Calibration''' )
return DataLoader(
__A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , )
def __A ( self: List[Any] , __A: Any=None ) -> Optional[int]:
_A = self.train_dataset if calib_dataset is None else calib_dataset
_A = self.get_calib_dataloader(__A )
_A = self.model
quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A )
model.eval()
quant_trainer.enable_calibration(__A )
logger.info('''***** Running calibration *****''' )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(__A ):
# Prediction step
_A ,_A ,_A = self.prediction_step(__A , __A , prediction_loss_only=__A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__A , self.quant_trainer_args )
_A = model
def __A ( self: Any , __A: Dict=None , __A: Tuple=None , __A: List[Any]=None , __A: str = "eval" ) -> int:
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(__A )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
__A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
_A = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_A = self.post_process_function(__A , __A , output.predictions )
_A = self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_A = metrics.pop(__A )
self.log(__A )
else:
_A = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , __A )
return metrics
def __A ( self: Union[str, Any] , __A: Optional[int] , __A: int , __A: List[Any]=None , __A: str = "test" ) -> Union[str, Any]:
_A = self.get_test_dataloader(__A )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_A = eval_loop(
__A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
_A = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(__A , __A , output.predictions , '''predict''' )
_A = self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_A = metrics.pop(__A )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A )
def __A ( self: Tuple , __A: Optional[Any]="./" ) -> List[str]:
_A = self.eval_dataset
_A = self.get_eval_dataloader(__A )
_A = next(iter(__A ) )
# saving device - to make it consistent
_A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
_A = tuple(v.to(__A ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
_A = True
_A = self.model.to(__A )
model.eval()
model.float()
_A = model.module if hasattr(__A , '''module''' ) else model
quant_trainer.configure_model(__A , self.quant_trainer_args )
_A = os.path.join(__A , '''model.onnx''' )
logger.info(f"""exporting model to {output_model_file}""" )
_A = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__A , )
logger.info('''onnx export finished''' )
| 62 | 0 |
import os
def __A ( _lowercase ):
'''simple docstring'''
_A = len(grid[0] )
_A = len(_lowercase )
_A = 0
_A = 0
_A = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowercase ):
for j in range(n_rows - 3 ):
_A = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_A = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_A = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_A = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_A = max(
_lowercase , _lowercase , _lowercase , _lowercase )
if max_product > largest:
_A = max_product
return largest
def __A ( ):
'''simple docstring'''
_A = []
with open(os.path.dirname(_lowercase ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
_A = [[int(_lowercase ) for i in grid[j]] for j in range(len(_lowercase ) )]
return largest_product(_lowercase )
if __name__ == "__main__":
print(solution())
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 42
A_ = None
# Automatically constructed
A_ = "dict"
A_ = None
A_ = field(default="Translation" , init=__A , repr=__A )
def __call__( self: List[str] ) -> Any:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __A ( self: Tuple ) -> Tuple:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = None
A_ = None
A_ = None
# Automatically constructed
A_ = "dict"
A_ = None
A_ = field(default="TranslationVariableLanguages" , init=__A , repr=__A )
def __A ( self: Dict ) -> Optional[int]:
_A = sorted(set(self.languages ) ) if self.languages else None
_A = len(self.languages ) if self.languages else None
def __call__( self: Optional[Any] ) -> List[Any]:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __A ( self: Optional[int] , __A: int ) -> Optional[int]:
_A = set(self.languages )
if self.languages and set(UpperCamelCase__ ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(UpperCamelCase__ ) - lang_set ) )}) are not in valid set ({", ".join(UpperCamelCase__ )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_A = []
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_A = zip(*sorted(UpperCamelCase__ ) )
return {"language": languages, "translation": translations}
def __A ( self: str ) -> Union[str, Any]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 708 |
import itertools
import string
from collections.abc import Generator, Iterable
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = iter(_lowercase )
while True:
_A = tuple(itertools.islice(_lowercase , _lowercase ) )
if not chunk:
return
yield chunk
def __A ( _lowercase ):
'''simple docstring'''
_A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_A = ''''''
if len(_lowercase ) < 2:
return dirty
for i in range(len(_lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowercase ) & 1:
clean += "X"
return clean
def __A ( _lowercase ):
'''simple docstring'''
_A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_A = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowercase )
return table
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = generate_table(_lowercase )
_A = prepare_input(_lowercase )
_A = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
_A ,_A = divmod(table.index(_lowercase ) , 5 )
_A ,_A = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = generate_table(_lowercase )
_A = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
_A ,_A = divmod(table.index(_lowercase ) , 5 )
_A ,_A = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 62 | 0 |
import functools
def __A ( _lowercase : List[Any] , _lowercase : Dict ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) or not all(isinstance(_lowercase , _lowercase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(_lowercase ) != 3 or not all(isinstance(_lowercase , _lowercase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(_lowercase ) == 0:
return 0
if min(_lowercase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(_lowercase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
_A = set(_lowercase )
@functools.cache
def dynamic_programming(_lowercase : List[Any] ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Tuple , __A: Any , __A: List[Any]=14 , __A: Dict=7 , __A: List[str]=True , __A: Tuple=True , __A: Union[str, Any]=True , __A: List[Any]=True , __A: Optional[int]=True , __A: Tuple=99 , __A: Optional[Any]=32 , __A: List[str]=5 , __A: Dict=4 , __A: str=37 , __A: Dict="gelu" , __A: List[str]=0.1 , __A: str=0.1 , __A: Any=5_12 , __A: Union[str, Any]=16 , __A: List[Any]=2 , __A: Tuple=0.02 , __A: Tuple=3 , __A: Union[str, Any]=4 , __A: Any=None , ) -> Optional[Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_token_type_ids
_A = use_input_mask
_A = use_labels
_A = use_mc_token_ids
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = self.vocab_size - 1
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
if self.use_mc_token_ids:
_A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
_A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self: Optional[int] ) -> List[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Dict , __A: Optional[int] , __A: List[str] , __A: List[str] , *__A: Optional[int] ) -> Optional[Any]:
_A = CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
_A = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __A ( self: Optional[Any] , __A: List[str] , __A: Dict , __A: List[Any] , __A: List[Any] , __A: Any , *__A: Any ) -> str:
_A = CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
_A = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self: Optional[int] ) -> Dict:
_A = self.prepare_config_and_inputs()
(
(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,(
_A
) ,
) = config_and_inputs
_A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __A ( self: List[str] , __A: Dict , __A: Dict , __A: Tuple , __A: List[Any] , *__A: Optional[int] ) -> Any:
_A = self.num_labels
_A = CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
A_ = (CTRLLMHeadModel,) if is_torch_available() else ()
A_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
def __A ( self: Any , __A: List[Any] , __A: int , __A: Optional[Any] , __A: Optional[int] , __A: List[Any] ) -> List[str]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __A ( self: Any ) -> Union[str, Any]:
_A = CTRLModelTester(self )
_A = ConfigTester(self , config_class=__A , n_embd=37 )
def __A ( self: Optional[int] ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __A ( self: Dict ) -> Any:
self.config_tester.run_common_tests()
def __A ( self: str ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def __A ( self: List[str] ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self: Optional[Any] ) -> int:
pass
@slow
def __A ( self: Tuple ) -> Dict:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __A ( self: Any ) -> Union[str, Any]:
pass
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: int ) -> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __A ( self: Any ) -> Any:
_A = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__A )
_A = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=__A ) # Legal the president is
_A = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
_A = model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 62 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __A ( _lowercase , _lowercase=False ):
'''simple docstring'''
_A = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
_A = '''segformer.encoder.''' + key
if key.startswith('''backbone''' ):
_A = key.replace('''backbone''' , '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_A = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
_A = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(_lowercase )-1}""" )
if "norm" in key:
_A = key.replace('''norm''' , '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_A = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
_A = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(_lowercase )-1}""" )
if "layer_norm1" in key:
_A = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
_A = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
_A = key[key.find('''block''' ) + len('''block''' )]
_A = key.replace(f"""block{idx}""" , f"""block.{int(_lowercase )-1}""" )
if "attn.q" in key:
_A = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
_A = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
_A = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
_A = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
_A = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
_A = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
_A = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
_A = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_A = key[key.find('''linear_c''' ) + len('''linear_c''' )]
_A = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(_lowercase )-1}""" )
if key.startswith('''head''' ):
_A = key.replace('''head''' , '''classifier''' )
_A = value
return new_state_dict
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_A = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_A = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_A = kv_weight[
: config.hidden_sizes[i], :
]
_A = kv_bias[: config.hidden_sizes[i]]
_A = kv_weight[
config.hidden_sizes[i] :, :
]
_A = kv_bias[
config.hidden_sizes[i] :
]
def __A ( ):
'''simple docstring'''
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return image
@torch.no_grad()
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = SegformerConfig()
_A = False
# set attributes based on model_name
_A = '''huggingface/label-files'''
if "segformer" in model_name:
_A = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
_A = 1_50
_A = '''ade20k-id2label.json'''
_A = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
_A = 19
_A = '''cityscapes-id2label.json'''
_A = (1, 19, 1_28, 1_28)
else:
raise ValueError(f"""Model {model_name} not supported""" )
elif "mit" in model_name:
_A = True
_A = model_name[4:6]
_A = 10_00
_A = '''imagenet-1k-id2label.json'''
_A = (1, 10_00)
else:
raise ValueError(f"""Model {model_name} not supported""" )
# set config attributes
_A = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
_A = {int(_lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_A = [64, 1_28, 3_20, 5_12]
_A = 2_56
elif size == "b2":
_A = [64, 1_28, 3_20, 5_12]
_A = 7_68
_A = [3, 4, 6, 3]
elif size == "b3":
_A = [64, 1_28, 3_20, 5_12]
_A = 7_68
_A = [3, 4, 18, 3]
elif size == "b4":
_A = [64, 1_28, 3_20, 5_12]
_A = 7_68
_A = [3, 8, 27, 3]
elif size == "b5":
_A = [64, 1_28, 3_20, 5_12]
_A = 7_68
_A = [3, 6, 40, 3]
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor (only resize + normalize)
_A = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_lowercase , align=_lowercase , do_random_crop=_lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=_lowercase , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
_A = torch.load(_lowercase , map_location=torch.device('''cpu''' ) )
else:
_A = torch.load(_lowercase , map_location=torch.device('''cpu''' ) )['''state_dict''']
# rename keys
_A = rename_keys(_lowercase , encoder_only=_lowercase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_lowercase , _lowercase )
# create HuggingFace model and load state dict
if encoder_only:
_A = False
_A = SegformerForImageClassification(_lowercase )
else:
_A = SegformerForSemanticSegmentation(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# forward pass
_A = model(_lowercase )
_A = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_A = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_A = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_A = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_A = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_A = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_A = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_A = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_A = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_A = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_A = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_A = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_A = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_A = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_A = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_A = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
_A = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 710 |
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = True
_A = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowercase , _lowercase , _lowercase )
order.append(_lowercase )
return order
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = True
_A = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowercase , _lowercase , _lowercase )
return component
def __A ( _lowercase ):
'''simple docstring'''
_A = len(_lowercase ) * [False]
_A = {vert: [] for vert in range(len(_lowercase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowercase )
_A = []
for i, was_visited in enumerate(_lowercase ):
if not was_visited:
order += topology_sort(_lowercase , _lowercase , _lowercase )
_A = []
_A = len(_lowercase ) * [False]
for i in range(len(_lowercase ) ):
_A = order[len(_lowercase ) - i - 1]
if not visited[vert]:
_A = find_components(_lowercase , _lowercase , _lowercase )
components_list.append(_lowercase )
return components_list
| 62 | 0 |
import os
from collections.abc import Iterator
def __A ( _lowercase = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
_A = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip('''./''' )
def __A ( _lowercase ):
'''simple docstring'''
return f"""{i * " "}*""" if i else "\n##"
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def __A ( _lowercase = "." ):
'''simple docstring'''
_A = ''''''
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
_A ,_A = os.path.split(__lowerCAmelCase )
if filepath != old_path:
_A = print_path(__lowerCAmelCase , __lowerCAmelCase )
_A = (filepath.count(os.sep ) + 1) if filepath else 0
_A = f"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
_A = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f"""{md_prefix(__lowerCAmelCase )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 711 |
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_A = mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase )
else:
_A = max(
mf_knapsack(i - 1 , _lowercase , _lowercase , _lowercase ) , mf_knapsack(i - 1 , _lowercase , _lowercase , j - wt[i - 1] ) + val[i - 1] , )
_A = val
return f[i][j]
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_A = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_A = dp[i - 1][w_]
return dp[n][w_], dp
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not (isinstance(_lowercase , (list, tuple) ) and isinstance(_lowercase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
_A = len(_lowercase )
if num_items != len(_lowercase ):
_A = (
'''The number of weights must be the same as the number of values.\n'''
f"""But got {num_items} weights and {len(_lowercase )} values"""
)
raise ValueError(_lowercase )
for i in range(_lowercase ):
if not isinstance(wt[i] , _lowercase ):
_A = (
'''All weights must be integers but got weight of '''
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(_lowercase )
_A ,_A = knapsack(_lowercase , _lowercase , _lowercase , _lowercase )
_A = set()
_construct_solution(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return optimal_val, example_optional_set
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowercase , _lowercase , i - 1 , _lowercase , _lowercase )
else:
optimal_set.add(_lowercase )
_construct_solution(_lowercase , _lowercase , i - 1 , j - wt[i - 1] , _lowercase )
if __name__ == "__main__":
__A = [3, 2, 4, 4]
__A = [4, 3, 2, 3]
__A = 4
__A = 6
__A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__A , __A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__A , __A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 62 | 0 |
def __A ( _lowercase ):
'''simple docstring'''
if length <= 0 or not isinstance(a_ , a_ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(a_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 712 |
def __A ( _lowercase = 1_00_00_00 ):
'''simple docstring'''
_A = 1
_A = 1
_A = {1: 1}
for inputa in range(2 , _lowercase ):
_A = 0
_A = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_A = (3 * number) + 1
counter += 1
if inputa not in counters:
_A = counter
if counter > pre_counter:
_A = inputa
_A = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 62 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.