code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def __lowerCamelCase ( lowerCamelCase__ = 4_000_000 ):
"""simple docstring"""
lowercase__ : Any = []
lowercase__ , lowercase__ : List[Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCamelCase__ )
lowercase__ , lowercase__ : Optional[Any] = b, a + b
return sum(lowerCamelCase__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 130
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def snake_case ( self : str ):
super().setUp()
lowercase__ : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Any = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[str] = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Tuple ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[int] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : List[str] ):
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def snake_case ( self : Union[str, Any] ):
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def snake_case ( self : str ):
lowercase__ : List[str] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : List[str] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : str = tokenizer(SCREAMING_SNAKE_CASE , max_length=len(SCREAMING_SNAKE_CASE ) , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_torch
def snake_case ( self : Optional[int] ):
lowercase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Any = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIn("input_ids" , SCREAMING_SNAKE_CASE )
self.assertIn("attention_mask" , SCREAMING_SNAKE_CASE )
self.assertNotIn("labels" , SCREAMING_SNAKE_CASE )
self.assertNotIn("decoder_attention_mask" , SCREAMING_SNAKE_CASE )
@require_torch
def snake_case ( self : List[str] ):
lowercase__ : List[str] = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Any = tokenizer(text_target=SCREAMING_SNAKE_CASE , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def snake_case ( self : Optional[int] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Tuple = tokenizer(
["I am a small frog" * 1_024, "I am a small frog"] , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def snake_case ( self : Tuple ):
lowercase__ : int = ["A long paragraph for summarization."]
lowercase__ : Union[str, Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Tuple = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowercase__ : List[str] = tokenizer(text_target=SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowercase__ : Union[str, Any] = inputs["input_ids"]
lowercase__ : List[str] = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = ["Summary of the text.", "Another summary."]
lowercase__ : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = [[0] * len(SCREAMING_SNAKE_CASE ) for x in encoded_output["input_ids"]]
lowercase__ : Optional[int] = tokenizer.pad(SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(outputs["global_attention_mask"] , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
pass
def snake_case ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Any = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 130
| 1
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case ( snake_case__ :List[Any] , snake_case__ :List[Any] , snake_case__ :Union[str, Any]) -> Union[str, Any]:
return [
int(1_000 * (box[0] / width)),
int(1_000 * (box[1] / height)),
int(1_000 * (box[2] / width)),
int(1_000 * (box[3] / height)),
]
def snake_case ( snake_case__ :np.ndarray , snake_case__ :Optional[str] , snake_case__ :Optional[str] = None) -> Dict:
_A = tesseract_config if tesseract_config is not None else """"""
# apply OCR
_A = to_pil_image(snake_case__)
_A , _A = pil_image.size
_A = pytesseract.image_to_data(snake_case__ , lang=snake_case__ , output_type="""dict""" , config=snake_case__)
_A , _A , _A , _A , _A = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_A = [idx for idx, word in enumerate(snake_case__) if not word.strip()]
_A = [word for idx, word in enumerate(snake_case__) if idx not in irrelevant_indices]
_A = [coord for idx, coord in enumerate(snake_case__) if idx not in irrelevant_indices]
_A = [coord for idx, coord in enumerate(snake_case__) if idx not in irrelevant_indices]
_A = [coord for idx, coord in enumerate(snake_case__) if idx not in irrelevant_indices]
_A = [coord for idx, coord in enumerate(snake_case__) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_A = []
for x, y, w, h in zip(snake_case__ , snake_case__ , snake_case__ , snake_case__):
_A = [x, y, x + w, y + h]
actual_boxes.append(snake_case__)
# finally, normalize the bounding boxes
_A = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(snake_case__ , snake_case__ , snake_case__))
assert len(snake_case__) == len(snake_case__), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = ['''pixel_values''']
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = "" , **lowerCAmelCase_ , ) -> None:
super().__init__(**lowerCAmelCase_ )
_A = size if size is not None else {"""height""": 2_24, """width""": 2_24}
_A = get_size_dict(lowerCAmelCase_ )
_A = do_resize
_A = size
_A = resample
_A = apply_ocr
_A = ocr_lang
_A = tesseract_config
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
_A = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_A = (size["""height"""], size["""width"""])
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> PIL.Image.Image:
_A = do_resize if do_resize is not None else self.do_resize
_A = size if size is not None else self.size
_A = get_size_dict(lowerCAmelCase_ )
_A = resample if resample is not None else self.resample
_A = apply_ocr if apply_ocr is not None else self.apply_ocr
_A = ocr_lang if ocr_lang is not None else self.ocr_lang
_A = tesseract_config if tesseract_config is not None else self.tesseract_config
_A = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_A = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_A = []
_A = []
for image in images:
_A , _A = apply_tesseract(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
words_batch.append(lowerCAmelCase_ )
boxes_batch.append(lowerCAmelCase_ )
if do_resize:
_A = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_A = [flip_channel_order(lowerCAmelCase_ ) for image in images]
_A = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_A = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowerCAmelCase_ )
if apply_ocr:
_A = words_batch
_A = boxes_batch
return data
| 81
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = "openai/clip-vit-large-patch14" ) -> None:
_A = device
_A = CLIPTokenizerFast.from_pretrained(lowerCAmelCase_ )
_A = [0.4814_5466, 0.457_8275, 0.4082_1073]
_A = [0.2686_2954, 0.2613_0258, 0.2757_7711]
_A = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_A = torchvision.transforms.Resize(2_24 )
_A = torchvision.transforms.CenterCrop(2_24 )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.resize(lowerCAmelCase_ )
_A = self.center_crop(lowerCAmelCase_ )
_A = self.normalize(lowerCAmelCase_ )
return images
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A = self.tokenizer(text=lowerCAmelCase_ , **lowerCAmelCase_ )
_A = self.preprocess_img(lowerCAmelCase_ )
_A = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_=10 , lowerCAmelCase_=0.01 , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="image" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> None:
super().__init__()
_A = None
_A = device if device else get_device()
if vqgan:
_A = vqgan
else:
_A = load_vqgan(self.device , conf_path=lowerCAmelCase_ , ckpt_path=lowerCAmelCase_ )
self.vqgan.eval()
if clip:
_A = clip
else:
_A = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_A = ProcessorGradientFlow(device=self.device )
_A = iterations
_A = lr
_A = log
_A = make_grid
_A = return_val
_A = quantize
_A = self.vqgan.decoder.z_shape
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=5 , lowerCAmelCase_=True ) -> Any:
_A = []
if output_path is None:
_A = """./animation.gif"""
if input_path is None:
_A = self.save_path
_A = sorted(glob(input_path + """/*""" ) )
if not len(lowerCAmelCase_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(lowerCAmelCase_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_A = total_duration / len(lowerCAmelCase_ )
_A = [frame_duration] * len(lowerCAmelCase_ )
if extend_frames:
_A = 1.5
_A = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(lowerCAmelCase_ ) )
imageio.mimsave(lowerCAmelCase_ , lowerCAmelCase_ , duration=lowerCAmelCase_ )
print(F'''gif saved to {output_path}''' )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> str:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_A = preprocess(Image.open(lowerCAmelCase_ ) , target_image_size=2_56 ).to(self.device )
_A = preprocess_vqgan(lowerCAmelCase_ )
_A , *_A = self.vqgan.encode(lowerCAmelCase_ )
return z
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_A = self.latent.detach().requires_grad_()
_A = base_latent + transform_vector
if self.quantize:
_A , *_A = self.vqgan.quantize(lowerCAmelCase_ )
else:
_A = trans_latent
return self.vqgan.decode(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_A = self.clip_preprocessor(text=lowerCAmelCase_ , images=lowerCAmelCase_ , return_tensors="""pt""" , padding=lowerCAmelCase_ )
_A = self.clip(**lowerCAmelCase_ )
_A = clip_outputs.logits_per_image
if weights is not None:
_A = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_A = self._get_clip_similarity(pos_prompts["""prompts"""] , lowerCAmelCase_ , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_A = self._get_clip_similarity(neg_prompts["""prompts"""] , lowerCAmelCase_ , weights=neg_prompts["""weights"""] )
else:
_A = torch.tensor([1] , device=self.device )
_A = -torch.log(lowerCAmelCase_ ) + torch.log(lowerCAmelCase_ )
return loss
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = torch.randn_like(self.latent , requires_grad=lowerCAmelCase_ , device=self.device )
_A = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_A = self._add_vector(lowerCAmelCase_ )
_A = loop_post_process(lowerCAmelCase_ )
_A = self._get_CLIP_loss(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print("""CLIP loss""" , lowerCAmelCase_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=lowerCAmelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
wandb.init(reinit=lowerCAmelCase_ , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_A = Image.open(lowerCAmelCase_ )
_A = image.resize((2_56, 2_56) )
wandb.log("""Original Image""" , wandb.Image(lowerCAmelCase_ ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
if not prompts:
return []
_A = []
_A = []
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(lowerCAmelCase_ , (tuple, list) ):
_A = prompt[0]
_A = float(prompt[1] )
elif ":" in prompt:
_A , _A = prompt.split(""":""" )
_A = float(lowerCAmelCase_ )
else:
_A = prompt
_A = 1.0
processed_prompts.append(lowerCAmelCase_ )
weights.append(lowerCAmelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase_ , device=self.device ),
}
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , ) -> str:
if image_path:
_A = self._get_latent(lowerCAmelCase_ )
else:
_A = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
_A = self.process_prompts(lowerCAmelCase_ )
_A = self.process_prompts(lowerCAmelCase_ )
if save_final and save_path is None:
_A = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
else:
_A = save_path + """_""" + get_timestamp()
os.makedirs(lowerCAmelCase_ )
_A = save_path
_A = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(lowerCAmelCase_ ) )
_A = loop_post_process(lowerCAmelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ):
if show_intermediate:
show_pil(lowerCAmelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(lowerCAmelCase_ )} )
if show_final:
show_pil(lowerCAmelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 81
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : int = ["""image_processor""", """tokenizer"""]
__a : Union[str, Any] = """ChineseCLIPImageProcessor"""
__a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
| 34
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A__ : Tuple = logging.get_logger(__name__)
A__ : int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : int=None, lowerCamelCase : int=None, *lowerCamelCase : List[Any], **lowerCamelCase : Any ):
'''simple docstring'''
super().__init__(*lowerCamelCase, **lowerCamelCase )
if config is None:
assert isinstance(self.model, lowerCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
lowercase__ = self.model.config
else:
lowercase__ = config
lowercase__ = data_args
lowercase__ = self.config.tgt_vocab_size if isinstance(self.config, lowerCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase__ = label_smoothed_nll_loss
def lowercase__ ( self : List[Any], lowerCamelCase : int ):
'''simple docstring'''
if self.optimizer is None:
lowercase__ = ['''bias''', '''LayerNorm.weight''']
lowercase__ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase__ = Adafactor
lowercase__ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase__ = AdamW
lowercase__ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase__ = self.args.learning_rate
if self.sharded_ddp:
lowercase__ = OSS(
params=lowerCamelCase, optim=lowerCamelCase, **lowerCamelCase, )
else:
lowercase__ = optimizer_cls(lowerCamelCase, **lowerCamelCase )
if self.lr_scheduler is None:
lowercase__ = self._get_lr_scheduler(lowerCamelCase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def lowercase__ ( self : List[str], lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase__ = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps )
else:
lowercase__ = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=lowerCamelCase )
return scheduler
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if isinstance(self.train_dataset, torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0]
lowercase__ = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) )
else:
# compute usual loss via models
lowercase__ , lowercase__ = model(**lowerCamelCase, labels=lowerCamelCase, use_cache=lowerCamelCase )[:2]
else:
# compute label smoothed loss
lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0]
lowercase__ = torch.nn.functional.log_softmax(lowerCamelCase, dim=-1 )
lowercase__ , lowercase__ = self.loss_fn(lowerCamelCase, lowerCamelCase, self.args.label_smoothing, ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = inputs.pop('''labels''' )
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return loss
def lowercase__ ( self : str, lowerCamelCase : nn.Module, lowerCamelCase : Dict[str, Union[torch.Tensor, Any]], lowerCamelCase : bool, lowerCamelCase : Optional[List[str]] = None, ):
'''simple docstring'''
lowercase__ = self._prepare_inputs(lowerCamelCase )
lowercase__ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase__ = self.model.generate(
inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], **lowerCamelCase, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] )
lowercase__ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Any ):
'''simple docstring'''
# If PAD token is not defined at least EOS token has to be defined
lowercase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""" )
lowercase__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device )
lowercase__ = tensor
return padded_tensor
| 207
| 0
|
import copy
import random
from transformers import CLIPTokenizer
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
a = {}
def UpperCamelCase_ (self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
a = super().add_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
" `placeholder_token` that is not already in the tokenizer." )
def UpperCamelCase_ (self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=1 , **lowerCamelCase_ ):
"""simple docstring"""
a = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
output.append(lowerCamelCase_ )
else:
a = []
for i in range(lowerCamelCase_ ):
a = placeholder_token + F'''_{i}'''
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
output.append(lowerCamelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
a = output
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1.0 ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a = []
for i in range(len(lowerCamelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a = self.token_map[placeholder_token]
a = tokens[: 1 + int(len(lowerCamelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
a = copy.copy(lowerCamelCase_ )
random.shuffle(lowerCamelCase_ )
a = text.replace(lowerCamelCase_ , " ".join(lowerCamelCase_ ) )
return text
def __call__(self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1.0 , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_ ) , *lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCamelCase_ (self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1.0 , **lowerCamelCase_ ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_ ) , *lowerCamelCase_ , **lowerCamelCase_ , )
| 352
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = ["image_processor", "tokenizer"]
__A = "ViTImageProcessor"
__A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase_ , )
a = kwargs.pop("feature_extractor" )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
a = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if visual_prompt is not None:
a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if images is not None:
a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if visual_prompt is not None and images is not None:
a = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
a = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ )
def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase_ , )
return self.image_processor_class
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase_ , )
return self.image_processor
| 71
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = CTRLTokenizer
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ = {'''unk_token''': '''<unk>'''}
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase ) )
def lowercase__ ( self : Union[str, Any], **lowerCamelCase : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowercase__ = '''adapt react readapt apt'''
lowercase__ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), lowerCamelCase )
| 207
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A__ : Tuple = logging.get_logger(__name__)
A__ : int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : int=None, lowerCamelCase : int=None, *lowerCamelCase : List[Any], **lowerCamelCase : Any ):
'''simple docstring'''
super().__init__(*lowerCamelCase, **lowerCamelCase )
if config is None:
assert isinstance(self.model, lowerCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
lowercase__ = self.model.config
else:
lowercase__ = config
lowercase__ = data_args
lowercase__ = self.config.tgt_vocab_size if isinstance(self.config, lowerCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase__ = label_smoothed_nll_loss
def lowercase__ ( self : List[Any], lowerCamelCase : int ):
'''simple docstring'''
if self.optimizer is None:
lowercase__ = ['''bias''', '''LayerNorm.weight''']
lowercase__ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase__ = Adafactor
lowercase__ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase__ = AdamW
lowercase__ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase__ = self.args.learning_rate
if self.sharded_ddp:
lowercase__ = OSS(
params=lowerCamelCase, optim=lowerCamelCase, **lowerCamelCase, )
else:
lowercase__ = optimizer_cls(lowerCamelCase, **lowerCamelCase )
if self.lr_scheduler is None:
lowercase__ = self._get_lr_scheduler(lowerCamelCase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def lowercase__ ( self : List[str], lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase__ = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps )
else:
lowercase__ = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=lowerCamelCase )
return scheduler
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if isinstance(self.train_dataset, torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0]
lowercase__ = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) )
else:
# compute usual loss via models
lowercase__ , lowercase__ = model(**lowerCamelCase, labels=lowerCamelCase, use_cache=lowerCamelCase )[:2]
else:
# compute label smoothed loss
lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0]
lowercase__ = torch.nn.functional.log_softmax(lowerCamelCase, dim=-1 )
lowercase__ , lowercase__ = self.loss_fn(lowerCamelCase, lowerCamelCase, self.args.label_smoothing, ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = inputs.pop('''labels''' )
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return loss
def lowercase__ ( self : str, lowerCamelCase : nn.Module, lowerCamelCase : Dict[str, Union[torch.Tensor, Any]], lowerCamelCase : bool, lowerCamelCase : Optional[List[str]] = None, ):
'''simple docstring'''
lowercase__ = self._prepare_inputs(lowerCamelCase )
lowercase__ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase__ = self.model.generate(
inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], **lowerCamelCase, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] )
lowercase__ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Any ):
'''simple docstring'''
# If PAD token is not defined at least EOS token has to be defined
lowercase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""" )
lowercase__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device )
lowercase__ = tensor
return padded_tensor
| 207
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_snake_case = 5_0000
_snake_case = 5000
_snake_case , _snake_case = os.path.split(__file__)
_snake_case = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i in range(UpperCamelCase__ ):
_a : Any = dataset[i]
@get_duration
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
_a : Any = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
_a : List[Any] = dataset[i]
@get_duration
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
_a : List[str] = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
_a : List[str] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0_0}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0_0_0}),
]
_a : Optional[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0_0}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
_a : str = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
_a : int = generate_example_dataset(
os.path.join(UpperCamelCase__ , """dataset.arrow""" ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={"""list""": (1_0_0,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(UpperCamelCase__ ) )
_a : Tuple = func(UpperCamelCase__ , **UpperCamelCase__ )
print("""shuffling dataset""" )
_a : str = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(UpperCamelCase__ ) )
_a : str = func(
UpperCamelCase__ , **UpperCamelCase__ )
with open(UpperCamelCase__ , """wb""" ) as f:
f.write(json.dumps(UpperCamelCase__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 365
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324
| 0
|
import re
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(A__ , A__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 12
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : int = IFInpaintingPipeline
__lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowercase : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return self._get_dummy_components()
def __UpperCamelCase ( self , A_ , A_=0 ) -> List[Any]:
"""simple docstring"""
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self._test_save_load_local()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 222
| 0
|
import argparse
import os
import re
__snake_case ='src/transformers'
# Pattern that looks at the indentation in a line.
__snake_case =re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__snake_case =re.compile(R'^\s*\"([^\"]+)\":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__snake_case =re.compile(R'^\s*_import_structure\[\"([^\"]+)\"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__snake_case =re.compile(R'^\s*\"([^\"]+)\",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__snake_case =re.compile(R'\[([^\]]+)\]')
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ : int = _re_indent.search(lowerCAmelCase__)
return "" if search is None else search.groups()[0]
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Any="" ,lowerCamelCase_ : Tuple=None ,lowerCamelCase_ : Union[str, Any]=None):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Optional[Any] = code.split('''\n''')
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase__):
index += 1
lowerCAmelCase__ : Union[str, Any] = ['''\n'''.join(lines[:index])]
else:
lowerCAmelCase__ : Tuple = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase__ : Tuple = [lines[index]]
index += 1
while index < len(lowerCAmelCase__) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase__)):
if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level:
if len(lowerCAmelCase__) > 0 and get_indent(current_block[-1]).startswith(indent_level + ''' '''):
current_block.append(lines[index])
blocks.append('''\n'''.join(lowerCAmelCase__))
if index < len(lowerCAmelCase__) - 1:
lowerCAmelCase__ : str = [lines[index + 1]]
index += 1
else:
lowerCAmelCase__ : Optional[Any] = []
else:
blocks.append('''\n'''.join(lowerCAmelCase__))
lowerCAmelCase__ : Union[str, Any] = [lines[index]]
else:
current_block.append(lines[index])
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase__) > 0:
blocks.append('''\n'''.join(lowerCAmelCase__))
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase__):
blocks.append('''\n'''.join(lines[index:]))
return blocks
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
def _inner(lowerCamelCase_ : List[str]):
return key(lowerCAmelCase__).lower().replace('''_''' ,'''''')
return _inner
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : str=None):
'''simple docstring'''
def noop(lowerCamelCase_ : int):
return x
if key is None:
lowerCAmelCase__ : Union[str, Any] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase__ : int = [obj for obj in objects if key(lowerCAmelCase__).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase__ : str = [obj for obj in objects if key(lowerCAmelCase__)[0].isupper() and not key(lowerCAmelCase__).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase__ : Optional[int] = [obj for obj in objects if not key(lowerCAmelCase__)[0].isupper()]
lowerCAmelCase__ : str = ignore_underscore(lowerCAmelCase__)
return sorted(lowerCAmelCase__ ,key=lowerCAmelCase__) + sorted(lowerCAmelCase__ ,key=lowerCAmelCase__) + sorted(lowerCAmelCase__ ,key=lowerCAmelCase__)
def lowerCAmelCase__ ( lowerCamelCase_ : List[str]):
'''simple docstring'''
def _replace(lowerCamelCase_ : Union[str, Any]):
lowerCAmelCase__ : int = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase__ : Optional[Any] = [part.strip().replace('''"''' ,'''''') for part in imports.split(''',''')]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
lowerCAmelCase__ : int = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(lowerCAmelCase__)]) + "]"
lowerCAmelCase__ : Optional[int] = import_statement.split('''\n''')
if len(lowerCAmelCase__) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase__ : List[str] = 2 if lines[1].strip() == '''[''' else 1
lowerCAmelCase__ : List[Any] = [(i, _re_strip_line.search(lowerCAmelCase__).groups()[0]) for i, line in enumerate(lines[idx:-idx])]
lowerCAmelCase__ : int = sort_objects(lowerCAmelCase__ ,key=lambda lowerCamelCase_: x[1])
lowerCAmelCase__ : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:])
elif len(lowerCAmelCase__) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1]) is not None:
lowerCAmelCase__ : int = _re_bracket_content.sub(_replace ,lines[1])
else:
lowerCAmelCase__ : Optional[int] = [part.strip().replace('''"''' ,'''''') for part in lines[1].split(''',''')]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
lowerCAmelCase__ : Union[str, Any] = keys[:-1]
lowerCAmelCase__ : List[Any] = get_indent(lines[1]) + ''', '''.join([f"""\"{k}\"""" for k in sort_objects(lowerCAmelCase__)])
return "\n".join(lowerCAmelCase__)
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase__ : Union[str, Any] = _re_bracket_content.sub(_replace ,lowerCAmelCase__)
return import_statement
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Tuple=True):
'''simple docstring'''
with open(lowerCAmelCase__ ,encoding='''utf-8''') as f:
lowerCAmelCase__ : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase__ : int = split_code_in_indented_blocks(
lowerCAmelCase__ ,start_prompt='''_import_structure = {''' ,end_prompt='''if TYPE_CHECKING:''')
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(lowerCAmelCase__) - 1):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase__ : Optional[Any] = main_blocks[block_idx]
lowerCAmelCase__ : int = block.split('''\n''')
# Get to the start of the imports.
lowerCAmelCase__ : str = 0
while line_idx < len(lowerCAmelCase__) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase__ : Tuple = len(lowerCAmelCase__)
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase__):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase__ : Dict = '''\n'''.join(block_lines[line_idx:-1])
lowerCAmelCase__ : Optional[Any] = get_indent(block_lines[1])
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase__ : Tuple = split_code_in_indented_blocks(lowerCAmelCase__ ,indent_level=lowerCAmelCase__)
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase__ : Optional[int] = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase__ : Union[str, Any] = [(pattern.search(lowerCAmelCase__).groups()[0] if pattern.search(lowerCAmelCase__) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase__ : Optional[Any] = [(i, key) for i, key in enumerate(lowerCAmelCase__) if key is not None]
lowerCAmelCase__ : Tuple = [x[0] for x in sorted(lowerCAmelCase__ ,key=lambda lowerCamelCase_: x[1])]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Optional[int] = []
for i in range(len(lowerCAmelCase__)):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i])
else:
lowerCAmelCase__ : Union[str, Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]])
reorderded_blocks.append(lowerCAmelCase__)
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase__ : List[Any] = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]])
if code != "\n".join(lowerCAmelCase__):
if check_only:
return True
else:
print(f"""Overwriting {file}.""")
with open(lowerCAmelCase__ ,'''w''' ,encoding='''utf-8''') as f:
f.write('''\n'''.join(lowerCAmelCase__))
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]=True):
'''simple docstring'''
lowerCAmelCase__ : List[str] = []
for root, _, files in os.walk(lowerCAmelCase__):
if "__init__.py" in files:
lowerCAmelCase__ : Union[str, Any] = sort_imports(os.path.join(lowerCAmelCase__ ,'''__init__.py''') ,check_only=lowerCAmelCase__)
if result:
lowerCAmelCase__ : List[str] = [os.path.join(lowerCAmelCase__ ,'''__init__.py''')]
if len(lowerCAmelCase__) > 0:
raise ValueError(f"""Would overwrite {len(lowerCAmelCase__)} files, run `make style`.""")
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__snake_case =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 361
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : List[Any]=1024 ,lowerCamelCase_ : int=1024 ,lowerCamelCase_ : Dict=False ,**lowerCamelCase_ : Tuple):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = SeqaSeqDataset(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,type_path='''train''' ,**lowerCamelCase_)
lowerCAmelCase__ : int = tok.pad_token_id
def get_lens(lowerCamelCase_ : Tuple):
lowerCAmelCase__ : Tuple = tqdm(
DataLoader(lowerCamelCase_ ,batch_size=512 ,num_workers=8 ,shuffle=lowerCamelCase_ ,collate_fn=ds.collate_fn) ,desc=str(ds.len_file) ,)
lowerCAmelCase__ : Tuple = []
for batch in dl:
lowerCAmelCase__ : Dict = batch['''input_ids'''].ne(lowerCamelCase_).sum(1).tolist()
lowerCAmelCase__ : Dict = batch['''labels'''].ne(lowerCamelCase_).sum(1).tolist()
if consider_target:
for src, tgt in zip(lowerCamelCase_ ,lowerCamelCase_):
max_lens.append(max(lowerCamelCase_ ,lowerCamelCase_))
else:
max_lens.extend(lowerCamelCase_)
return max_lens
lowerCAmelCase__ : str = get_lens(lowerCamelCase_)
lowerCAmelCase__ : Tuple = SeqaSeqDataset(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,type_path='''val''' ,**lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = get_lens(lowerCamelCase_)
pickle_save(lowerCamelCase_ ,train_ds.len_file)
pickle_save(lowerCamelCase_ ,val_ds.len_file)
if __name__ == "__main__":
fire.Fire(save_len_file)
| 94
| 0
|
"""simple docstring"""
import numpy as np
def UpperCamelCase_ ( lowerCAmelCase__ : np.array ) -> np.array:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
"""simple docstring"""
def __init__( self :str , lowerCamelCase_ :int , lowerCamelCase_ :List[str]=13 , lowerCamelCase_ :List[Any]=7 , lowerCamelCase_ :str=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Any=99 , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :Dict=5 , lowerCamelCase_ :Any=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :List[str]=512 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :List[str]=0.02 , lowerCamelCase_ :List[Any]=3 , lowerCamelCase_ :Dict=4 , lowerCamelCase_ :Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Union[str, Any] =batch_size
lowerCamelCase__ : Dict =seq_length
lowerCamelCase__ : List[str] =is_training
lowerCamelCase__ : List[Any] =use_token_type_ids
lowerCamelCase__ : Union[str, Any] =use_labels
lowerCamelCase__ : Optional[Any] =vocab_size
lowerCamelCase__ : List[Any] =hidden_size
lowerCamelCase__ : Optional[int] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : Optional[Any] =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : List[Any] =hidden_dropout_prob
lowerCamelCase__ : str =attention_probs_dropout_prob
lowerCamelCase__ : Tuple =max_position_embeddings
lowerCamelCase__ : Union[str, Any] =type_vocab_size
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : str =initializer_range
lowerCamelCase__ : Any =num_labels
lowerCamelCase__ : int =num_choices
lowerCamelCase__ : List[str] =scope
lowerCamelCase__ : List[str] =self.vocab_size - 1
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Union[str, Any] =None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Any =None
lowerCamelCase__ : Any =None
lowerCamelCase__ : str =None
if self.use_labels:
lowerCamelCase__ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : int =OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase__ : List[str] =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] , *lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Any =OpenAIGPTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , *lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : int =OpenAIGPTLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :str , *lowerCamelCase_ :Dict ):
"""simple docstring"""
lowerCamelCase__ : Tuple =OpenAIGPTDoubleHeadsModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , *lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.num_labels
lowerCamelCase__ : Tuple =OpenAIGPTForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[str] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : str =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Dict =config_and_inputs
lowerCamelCase__ : Tuple ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any]=False ):
"""simple docstring"""
lowerCamelCase__ : str =super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Dict =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
lowerCamelCase__ : Union[str, Any] =inputs_dict['labels']
lowerCamelCase__ : Tuple =inputs_dict['labels']
lowerCamelCase__ : int =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCamelCase_ , )
lowerCamelCase__ : Optional[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =OpenAIGPTModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCamelCase_ )
@slow
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] =OpenAIGPTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowerCamelCase_ )
lowerCamelCase__ : List[str] =torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCamelCase_ ) # the president is
lowerCamelCase__ : List[Any] =[
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : Tuple =model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase_ )
| 126
| 0
|
'''simple docstring'''
from pathlib import Path
import fire
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : int = Path(UpperCamelCase )
lowerCAmelCase__ : int = Path(UpperCamelCase )
dest_dir.mkdir(exist_ok=UpperCamelCase )
for path in src_dir.iterdir():
lowerCAmelCase__ : Dict = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase__ : Optional[int] = dest_dir.joinpath(path.name )
print(UpperCamelCase )
dest_path.open("""w""" ).write("""\n""".join(UpperCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 184
|
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : Optional[Union[str, Path]] = None
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = False
__lowercase : Optional[Dict] = None
__lowercase : Optional[str] = None
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = True
__lowercase : Optional[int] = None
__lowercase : int = 1
__lowercase : Optional[Union[str, bool]] = None
__lowercase : bool = False
__lowercase : Optional[Dict] = None
__lowercase : Optional[str] = None
def UpperCAmelCase_ ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 184
| 1
|
import os
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = os.path.join(os.path.dirname(_A ) , 'num.txt' )
with open(_A ) as file_hand:
return str(sum(int(_A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 307
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__UpperCamelCase : int = 299792458
# Symbols
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = symbols("""ct x y z""")
def a_ ( _A ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def a_ ( _A ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(_A ) ** 2 )
def a_ ( _A ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0],
[-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def a_ ( _A , _A = None ) -> np.ndarray:
"""simple docstring"""
# Ensure event is not empty
if event is None:
snake_case__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__UpperCamelCase : List[Any] = transform(29979245)
print("""Example of four vector: """)
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__UpperCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1}
__UpperCamelCase : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 307
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
__UpperCAmelCase = {
'''facebook/bart-base''': 1_024,
'''facebook/bart-large''': 1_024,
'''facebook/bart-large-mnli''': 1_024,
'''facebook/bart-large-cnn''': 1_024,
'''facebook/bart-large-xsum''': 1_024,
'''yjernite/bart_eli5''': 1_024,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[int] = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Dict = BartTokenizer
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('type' ) )
UpperCamelCase : Dict = add_prefix_space
UpperCamelCase : str = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase : Optional[Any] = 'post_processor'
UpperCamelCase : Any = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCamelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase : Tuple = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase : str = tuple(state['cls'] )
UpperCamelCase : Optional[int] = False
if state.get('add_prefix_space', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : Union[str, Any] = add_prefix_space
UpperCamelCase : Optional[int] = True
if state.get('trim_offsets', SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCamelCase : Union[str, Any] = trim_offsets
UpperCamelCase : Union[str, Any] = True
if changes_to_apply:
UpperCamelCase : int = getattr(SCREAMING_SNAKE_CASE_, state.pop('type' ) )
UpperCamelCase : Any = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCamelCase : Any = value
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : Optional[int] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCamelCase : List[str] = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase : List[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> List[str]:
UpperCamelCase : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : List[Any] = [self.sep_token_id]
UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 103
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = "data2vec-text"
def __init__( self, SCREAMING_SNAKE_CASE_=3_0522, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_="absolute", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : str = hidden_act
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : List[str] = position_embedding_type
UpperCamelCase : Any = use_cache
UpperCamelCase : Any = classifier_dropout
class lowerCAmelCase_ ( a__ ):
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 103
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'fnet'
def __init__( self , __snake_case=3_2_0_0_0 , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=3_0_7_2 , __snake_case="gelu_new" , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=4 , __snake_case=0.02 , __snake_case=1E-12 , __snake_case=False , __snake_case=5_1_2 , __snake_case=3 , __snake_case=1 , __snake_case=2 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = initializer_range
snake_case = type_vocab_size
snake_case = layer_norm_eps
snake_case = use_tpu_fourier_optimizations
snake_case = tpu_short_seq_length
| 127
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'mobilenet_v2'
def __init__( self , __snake_case=3 , __snake_case=2_2_4 , __snake_case=1.0 , __snake_case=8 , __snake_case=8 , __snake_case=6 , __snake_case=3_2 , __snake_case=True , __snake_case=True , __snake_case="relu6" , __snake_case=True , __snake_case=0.8 , __snake_case=0.02 , __snake_case=0.001 , __snake_case=2_5_5 , **__snake_case , ):
super().__init__(**__snake_case )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
snake_case = num_channels
snake_case = image_size
snake_case = depth_multiplier
snake_case = depth_divisible_by
snake_case = min_depth
snake_case = expand_ratio
snake_case = output_stride
snake_case = first_layer_is_expansion
snake_case = finegrained_output
snake_case = hidden_act
snake_case = tf_padding
snake_case = classifier_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = semantic_loss_ignore_index
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def a_ ( self ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def a_ ( self ):
return 1E-4
| 127
| 1
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase = 4
UpperCAmelCase = 3
class UpperCAmelCase_ ( _lowercase):
pass
def lowercase ( a__ : List[str] ) -> Union[str, Any]:
for shard in shards:
for i in range(a__ ):
yield {"i": i, "shard": shard}
def lowercase ( ) -> Union[str, Any]:
_UpperCamelCase = int(os.environ['''RANK'''] )
_UpperCamelCase = int(os.environ['''WORLD_SIZE'''] )
_UpperCamelCase = ArgumentParser()
parser.add_argument('''--streaming''' , type=a__ )
parser.add_argument('''--local_rank''' , type=a__ )
parser.add_argument('''--num_workers''' , type=a__ , default=0 )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = args.streaming
_UpperCamelCase = args.num_workers
_UpperCamelCase = {'''shards''': [F'''shard_{shard_idx}''' for shard_idx in range(a__ )]}
_UpperCamelCase = IterableDataset.from_generator(a__ , gen_kwargs=a__ )
if not streaming:
_UpperCamelCase = Dataset.from_list(list(a__ ) )
_UpperCamelCase = split_dataset_by_node(a__ , rank=a__ , world_size=a__ )
_UpperCamelCase = torch.utils.data.DataLoader(a__ , num_workers=a__ )
_UpperCamelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_UpperCamelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_UpperCamelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 54
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase_ :
snake_case__ = PegasusConfig
snake_case__ = {}
snake_case__ = '''gelu'''
def __init__( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[str]=13 , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : int=False , __UpperCamelCase : Optional[Any]=99 , __UpperCamelCase : int=32 , __UpperCamelCase : List[str]=5 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : List[str]=20 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Dict=0 , ) -> str:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def _UpperCamelCase ( self : Tuple ) -> List[str]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _UpperCamelCase ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ) -> str:
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ) -> List[str]:
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowercase ( a__ : Dict , a__ : str , a__ : str , a__ : Optional[int]=None , a__ : str=None , ) -> List[str]:
if attention_mask is None:
_UpperCamelCase = np.not_equal(a__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
_UpperCamelCase = FlaxPegasusModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ) -> Dict:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> str:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> str:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : Dict , __UpperCamelCase : str=None , **__UpperCamelCase : Dict ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self : Optional[Any] ) -> Dict:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = model_class(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__UpperCamelCase )
_UpperCamelCase = np.ones((1, 1) )
_UpperCamelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _UpperCamelCase ( self : str ) -> Any:
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(__UpperCamelCase , return_tensors='''np''' , truncation=__UpperCamelCase , max_length=512 , padding=__UpperCamelCase )
_UpperCamelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCamelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded
| 54
| 1
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str
__magic_name__: List[str]
__magic_name__: Optional[List[str]]
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: List[int]
__magic_name__: List[int]
__magic_name__: Optional[List[int]] = None
__magic_name__: Optional[List[int]] = None
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
__magic_name__: Union[str, Any] = "train"
__magic_name__: Dict = "dev"
__magic_name__: List[Any] = "test"
class SCREAMING_SNAKE_CASE_ :
@staticmethod
def UpperCAmelCase_ ( _A : int , _A : Union[str, Any] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def UpperCAmelCase_ ( _A : List[str] ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def UpperCAmelCase_ ( _A : Tuple , _A : str , _A : int , _A : List[str] , _A : Any=False , _A : List[str]="[CLS]" , _A : List[Any]=1 , _A : Dict="[SEP]" , _A : List[Any]=False , _A : Optional[int]=False , _A : Tuple=0 , _A : Any=0 , _A : Optional[Any]=-100 , _A : str=0 , _A : Optional[Any]=True , ) -> List[InputFeatures]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {label: i for i, label in enumerate(_a )}
snake_case_ : Optional[Any] = []
for ex_index, example in enumerate(_a ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' , _a , len(_a ) )
snake_case_ : List[str] = []
snake_case_ : int = []
for word, label in zip(example.words , example.labels ):
snake_case_ : List[Any] = tokenizer.tokenize(_a )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_a ) > 0:
tokens.extend(_a )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_a ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
snake_case_ : Optional[int] = tokenizer.num_special_tokens_to_add()
if len(_a ) > max_seq_length - special_tokens_count:
snake_case_ : str = tokens[: (max_seq_length - special_tokens_count)]
snake_case_ : int = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
snake_case_ : List[str] = [sequence_a_segment_id] * len(_a )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
snake_case_ : int = [cls_token] + tokens
snake_case_ : Optional[int] = [pad_token_label_id] + label_ids
snake_case_ : Dict = [cls_token_segment_id] + segment_ids
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(_a )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
snake_case_ : Optional[int] = [1 if mask_padding_with_zero else 0] * len(_a )
# Zero-pad up to the sequence length.
snake_case_ : List[str] = max_seq_length - len(_a )
if pad_on_left:
snake_case_ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
snake_case_ : Dict = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
snake_case_ : int = ([pad_token_segment_id] * padding_length) + segment_ids
snake_case_ : Optional[int] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_a ) == max_seq_length
assert len(_a ) == max_seq_length
assert len(_a ) == max_seq_length
assert len(_a ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(_a ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(_a ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(_a ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(_a ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(_a ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ : Dict = None
features.append(
InputFeatures(
input_ids=_a , attention_mask=_a , token_type_ids=_a , label_ids=_a ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
__magic_name__: List[InputFeatures]
__magic_name__: int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Tuple , _A : Dict , _A : Optional[Any] , _A : List[str] , _A : Optional[Any] , _A : List[Any] , _A : Any = None , _A : List[str]=False , _A : Any = Split.train , ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = os.path.join(
_a , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(_a ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : List[Any] = cached_features_file + '''.lock'''
with FileLock(_a ):
if os.path.exists(_a ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case_ : Optional[Any] = torch.load(_a )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case_ : List[Any] = token_classification_task.read_examples_from_file(_a , _a )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ : List[Any] = token_classification_task.convert_examples_to_features(
_a , _a , _a , _a , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_a , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _a )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Any , _A : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE_ :
__magic_name__: List[InputFeatures]
__magic_name__: int = -100
def __init__( self : Dict , _A : List[str] , _A : Dict , _A : Optional[int] , _A : Union[str, Any] , _A : Optional[Any] , _A : str = None , _A : Any=False , _A : Any = Split.train , ) -> Dict:
"""simple docstring"""
snake_case_ : str = token_classification_task.read_examples_from_file(_a , _a )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ : Optional[int] = token_classification_task.convert_examples_to_features(
_a , _a , _a , _a , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_a , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ : Union[str, Any] = tf.data.Dataset.from_generator(
_a , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
snake_case_ : Union[str, Any] = tf.data.Dataset.from_generator(
_a , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case_ : Dict = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ) -> str:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[Any] , _A : str ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 327
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=7 , _a=False , _a=True , _a=False , _a=False , _a=1_9 , _a=3_2 , _a=5 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Union[str, Any]:
_a : Optional[Any] = parent
_a : Union[str, Any] = batch_size
_a : List[Any] = seq_length
_a : Dict = is_training
_a : int = use_input_mask
_a : str = use_token_type_ids
_a : Any = use_labels
_a : List[Any] = vocab_size
_a : Any = hidden_size
_a : int = num_hidden_layers
_a : str = num_attention_heads
_a : Dict = intermediate_size
_a : List[str] = hidden_act
_a : Optional[Any] = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : Tuple = type_vocab_size
_a : str = type_sequence_label_size
_a : Any = initializer_range
_a : Union[str, Any] = num_labels
_a : Dict = num_choices
_a : Union[str, Any] = scope
def __lowercase ( self ) -> List[Any]:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict = None
if self.use_input_mask:
_a : int = random_attention_mask([self.batch_size, self.seq_length] )
_a : List[Any] = None
_a : Tuple = None
_a : Any = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_a : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ) -> str:
_a : Optional[int] = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_a , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> str:
_a : Union[str, Any] = EsmForProteinFolding(config=_a ).float()
model.to(_a )
model.eval()
_a : str = model(_a , attention_mask=_a )
_a : Union[str, Any] = model(_a )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def __lowercase ( self ) -> str:
_a : List[str] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[Any] = config_and_inputs
_a : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Any = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = ()
UpperCAmelCase__ : int = {} if is_torch_available() else {}
UpperCAmelCase__ : Optional[int] = False
def __lowercase ( self ) -> List[Any]:
_a : Optional[int] = EsmFoldModelTester(self )
_a : Dict = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def __lowercase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> str:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@unittest.skip('''Does not support attention outputs''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Any:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@require_torch
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> Optional[int]:
_a : Dict = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
_a : Tuple = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_a : Optional[Any] = model(_a )['''positions''']
_a : Union[str, Any] = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _a , atol=1e-4 ) )
| 235
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( A_ ):
"""simple docstring"""
A_ = "timesformer"
def __init__( self: Tuple , __A: Any=2_24 , __A: str=16 , __A: Tuple=3 , __A: List[str]=8 , __A: Union[str, Any]=7_68 , __A: Dict=12 , __A: List[Any]=12 , __A: Optional[int]=30_72 , __A: str="gelu" , __A: Union[str, Any]=0.0 , __A: Dict=0.0 , __A: str=0.02 , __A: Any=1e-6 , __A: Any=True , __A: Tuple="divided_space_time" , __A: Optional[Any]=0 , **__A: List[Any] , ) -> Optional[Any]:
super().__init__(**_lowerCamelCase )
_A = image_size
_A = patch_size
_A = num_channels
_A = num_frames
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = qkv_bias
_A = attention_type
_A = drop_path_rate
| 368
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __A ( self: Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __A ( self: Any ) -> Union[str, Any]:
_A = self.dummy_uncond_unet
_A = ScoreSdeVeScheduler()
_A = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A ).images
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A , return_dict=__A )[
0
]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Dict ) -> Any:
_A = '''google/ncsnpp-church-256'''
_A = UNetaDModel.from_pretrained(__A )
_A = ScoreSdeVeScheduler.from_pretrained(__A )
_A = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=__A ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_A = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 75
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =TransfoXLTokenizer
UpperCamelCase =False
UpperCamelCase =False
def _lowerCamelCase ( self ) -> Union[str, Any]:
super().setUp()
__lowercase : Dict = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
__lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> List[str]:
__lowercase : Dict = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : int = '<unk> UNwanted , running'
__lowercase : Any = '<unk> unwanted, running'
return input_text, output_text
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Tuple = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCamelCase_ )
__lowercase : List[str] = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(UpperCamelCase_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [0, 4, 8, 7] )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : List[Any] = TransfoXLTokenizer(lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _lowerCamelCase ( self ) -> str:
__lowercase : Any = TransfoXLTokenizer(lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Union[str, Any] = TransfoXLTokenizer(lower_case=UpperCamelCase_ )
__lowercase : int = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
__lowercase : List[Any] = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCamelCase_ ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : int = self.get_tokenizer()
__lowercase : Optional[Any] = len(UpperCamelCase_ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCamelCase_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 249
|
from __future__ import annotations
lowerCamelCase__ = list[list[int]]
# assigning initial values to the grid
lowerCamelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCamelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Matrix | None:
if location := find_empty_location(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[Any] = digit
if sudoku(SCREAMING_SNAKE_CASE_ ) is not None:
return grid
lowerCAmelCase__ : List[Any] = 0
return None
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for row in grid:
for cell in row:
print(SCREAMING_SNAKE_CASE_ , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
lowerCamelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 212
| 0
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
_lowercase : str = R'\w+[.]\d+'
_lowercase : Optional[int] = re.findall(lowerCamelCase_ , lowerCamelCase_ )
for pat in pats:
_lowercase : Optional[Any] = key.replace(lowerCamelCase_ , '_'.join(pat.split('.' ) ) )
return key
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_lowercase : int = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_lowercase : Any = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_lowercase : Union[str, Any] = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_lowercase : Union[str, Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_lowercase : List[str] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_lowercase : List[str] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
_lowercase : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_lowercase : str = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_lowercase : List[str] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=42 ) -> Optional[Any]:
# Step 1: Convert pytorch tensor to numpy
_lowercase : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_lowercase : Optional[Any] = flax_model.init_weights(PRNGKey(lowerCamelCase_ ) )
_lowercase : List[Any] = flatten_dict(lowerCamelCase_ )
_lowercase : int = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowercase : str = rename_key(lowerCamelCase_ )
_lowercase : Union[str, Any] = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
_lowercase : Optional[Any] = rename_key_and_reshape_tensor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
_lowercase : Dict = jnp.asarray(lowerCamelCase_ )
return unflatten_dict(lowerCamelCase_ )
| 352
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE : Tuple = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Union[str, Any] = EfficientNetConfig()
_lowercase : Any = CONFIG_MAP[model_name]['hidden_dim']
_lowercase : Any = CONFIG_MAP[model_name]['width_coef']
_lowercase : Optional[int] = CONFIG_MAP[model_name]['depth_coef']
_lowercase : List[Any] = CONFIG_MAP[model_name]['image_size']
_lowercase : Tuple = CONFIG_MAP[model_name]['dropout_rate']
_lowercase : Dict = CONFIG_MAP[model_name]['dw_padding']
_lowercase : str = 'huggingface/label-files'
_lowercase : Optional[Any] = 'imagenet-1k-id2label.json'
_lowercase : List[Any] = 1000
_lowercase : str = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : Optional[int] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Tuple = CONFIG_MAP[model_name]['image_size']
_lowercase : List[str] = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=lowerCamelCase_ , )
return preprocessor
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Tuple = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
_lowercase : Tuple = sorted(set(lowerCamelCase_ ) )
_lowercase : List[Any] = len(lowerCamelCase_ )
_lowercase : List[str] = {b: str(lowerCamelCase_ ) for b, i in zip(lowerCamelCase_ , range(lowerCamelCase_ ) )}
_lowercase : Optional[int] = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
_lowercase : Union[str, Any] = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
_lowercase : Optional[Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
_lowercase : str = 'efficientnet.' + item[1]
_lowercase : Optional[Any] = 'classifier.weight'
_lowercase : List[str] = 'classifier.bias'
return key_mapping
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
_lowercase : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
_lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_lowercase : Dict = torch.from_numpy(lowerCamelCase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_lowercase : Tuple = torch.from_numpy(np.transpose(lowerCamelCase_ ) )
else:
_lowercase : List[str] = torch.from_numpy(lowerCamelCase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase_ )
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Any = model_classes[model_name](
include_top=lowerCamelCase_ , weights='imagenet' , input_tensor=lowerCamelCase_ , input_shape=lowerCamelCase_ , pooling=lowerCamelCase_ , classes=1000 , classifier_activation='softmax' , )
_lowercase : int = original_model.trainable_variables
_lowercase : Dict = original_model.non_trainable_variables
_lowercase : Optional[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_lowercase : int = param.numpy()
_lowercase : int = list(tf_params.keys() )
# Load HuggingFace model
_lowercase : int = get_efficientnet_config(lowerCamelCase_ )
_lowercase : List[str] = EfficientNetForImageClassification(lowerCamelCase_ ).eval()
_lowercase : str = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
_lowercase : Optional[int] = rename_keys(lowerCamelCase_ )
replace_params(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Initialize preprocessor and preprocess input image
_lowercase : Optional[Any] = convert_image_processor(lowerCamelCase_ )
_lowercase : Optional[Any] = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
_lowercase : Any = hf_model(**lowerCamelCase_ )
_lowercase : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
_lowercase : List[Any] = False
_lowercase : List[Any] = CONFIG_MAP[model_name]['image_size']
_lowercase : int = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_lowercase : Optional[Any] = image.img_to_array(lowerCamelCase_ )
_lowercase : Any = np.expand_dims(lowerCamelCase_ , axis=0 )
_lowercase : Optional[int] = original_model.predict(lowerCamelCase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase_ ):
os.mkdir(lowerCamelCase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase_ )
preprocessor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
_lowercase : str = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCamelCase_ )
hf_model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 84
| 0
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 30
|
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCAmelCase = """base_with_context"""
def lowercase ( a__ : Optional[Any] , a__ : Optional[int] ) -> int:
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=a__ )
for lyr_num, lyr in enumerate(model.encoders ):
_UpperCamelCase = weights[F'''layers_{lyr_num}''']
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
_UpperCamelCase = ly_weight['''attention''']
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowercase ( a__ : List[Any] , a__ : Dict ) -> Optional[Any]:
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=a__ )
for lyr_num, lyr in enumerate(model.encoders ):
_UpperCamelCase = weights[F'''layers_{lyr_num}''']
_UpperCamelCase = ly_weight['''attention''']
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowercase ( a__ : List[Any] , a__ : Union[str, Any] ) -> str:
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=a__ )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_UpperCamelCase = weights[F'''layers_{lyr_num}''']
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
_UpperCamelCase = ly_weight['''self_attention''']
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_UpperCamelCase = ly_weight['''MultiHeadDotProductAttention_0''']
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def lowercase ( a__ : Union[str, Any] ) -> int:
_UpperCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_UpperCamelCase = jnp.tree_util.tree_map(onp.array , a__ )
_UpperCamelCase = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
_UpperCamelCase = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
_UpperCamelCase = inference.parse_training_gin_file(a__ , a__ )
_UpperCamelCase = inference.InferenceModel(args.checkpoint_path , a__ )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
_UpperCamelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
_UpperCamelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
_UpperCamelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_UpperCamelCase = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , a__ )
_UpperCamelCase = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , a__ )
_UpperCamelCase = load_decoder(ta_checkpoint['''target''']['''decoder'''] , a__ )
_UpperCamelCase = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
_UpperCamelCase = SpectrogramDiffusionPipeline(
notes_encoder=a__ , continuous_encoder=a__ , decoder=a__ , scheduler=a__ , melgan=a__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
UpperCAmelCase = parser.parse_args()
main(args)
| 256
| 0
|
from __future__ import annotations
from typing import TypedDict
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : int
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(a__ ) )]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
SCREAMING_SNAKE_CASE : List[Any] = all_rotations(a__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(a__ ),
}
return response
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
SCREAMING_SNAKE_CASE : List[Any] = int(a__ )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(a__ ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
SCREAMING_SNAKE_CASE : Optional[int] = [''''''] * len(a__ )
for _ in range(len(a__ ) ):
for i in range(len(a__ ) ):
SCREAMING_SNAKE_CASE : Any = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a__ : Optional[int] = '''Provide a string that I will generate its BWT transform: '''
a__ : Dict = input(entry_msg).strip()
a__ : Optional[Any] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
a__ : Union[str, Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 19
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : str = logging.get_logger(__name__)
a__ : Optional[Any] = {'''vocab_file''': '''vocab.json'''}
a__ : str = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
a__ : Tuple = {'''mgp-str''': 27}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _lowerCamelCase , _lowerCamelCase="[GO]" , _lowerCamelCase="[GO]" , _lowerCamelCase="[s]" , _lowerCamelCase="[GO]" , **_lowerCamelCase ) ->Dict:
super().__init__(
unk_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , pad_token=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : List[Any] = json.load(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self ) ->List[Any]:
return len(self.vocab )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(_lowerCamelCase )
return char_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
return self.vocab.get(_lowerCamelCase , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
return self.decoder.get(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowerCamelCase ) )
return
SCREAMING_SNAKE_CASE : str = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + '''\n''' )
return (vocab_file,)
| 19
| 1
|
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :list[list[str]] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
UpperCamelCase :int = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(SCREAMING_SNAKE_CASE__ ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Tuple = position % (lowest * 2) # puts it in bounds
UpperCamelCase :Any = min(SCREAMING_SNAKE_CASE__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = [''''''.join(SCREAMING_SNAKE_CASE__ ) for row in temp_grid]
UpperCamelCase :Optional[Any] = ''''''.join(SCREAMING_SNAKE_CASE__ )
return output_string
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Any = []
UpperCamelCase :Union[str, Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
UpperCamelCase :list[list[str]] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase :Optional[int] = position % (lowest * 2) # puts it in bounds
UpperCamelCase :Tuple = min(SCREAMING_SNAKE_CASE__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
UpperCamelCase :List[Any] = 0
for row in temp_grid: # fills in the characters
UpperCamelCase :Dict = input_string[counter : counter + len(SCREAMING_SNAKE_CASE__ )]
grid.append(list(SCREAMING_SNAKE_CASE__ ) )
counter += len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = '''''' # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase :Union[str, Any] = position % (lowest * 2) # puts it in bounds
UpperCamelCase :Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _A ( SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Dict = {}
for key_guess in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): # tries every key
UpperCamelCase :int = decrypt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
|
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ):
UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE__ )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCamelCase :Dict = 0
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
# Consider rest of the activities
for j in range(SCREAMING_SNAKE_CASE__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
UpperCamelCase :List[str] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = [1, 3, 0, 5, 8, 5]
__snake_case = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 259
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """openai/whisper-base"""
UpperCAmelCase__ = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase__ = """transcriber"""
UpperCAmelCase__ = WhisperProcessor
UpperCAmelCase__ = WhisperForConditionalGeneration
UpperCAmelCase__ = ["""audio"""]
UpperCAmelCase__ = ["""text"""]
def A_ ( self : Tuple , UpperCAmelCase : Optional[int] ) -> Tuple:
return self.pre_processor(UpperCAmelCase , return_tensors='pt' ).input_features
def A_ ( self : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
return self.model.generate(inputs=UpperCAmelCase )
def A_ ( self : Optional[Any] , UpperCAmelCase : Optional[int] ) -> Optional[int]:
return self.pre_processor.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )[0]
| 369
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : Tuple = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = ["""PerceiverFeatureExtractor"""]
_UpperCAmelCase : Dict = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_A = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self , A_ , A_ , A_ = None , A_ = None ) -> Optional[Any]:
__UpperCamelCase =None
__UpperCamelCase =os.path.abspath(os.path.join('examples' , 'by_feature' ) )
__UpperCamelCase =os.path.abspath('examples' )
for item in os.listdir(A_ ):
if item not in EXCLUDE_EXAMPLES:
__UpperCamelCase =os.path.join(A_ , A_ )
if os.path.isfile(A_ ) and ".py" in item_path:
with self.subTest(
tested_script=A_ , feature_script=A_ , tested_section='main()' if parser_only else 'training_function()' , ):
__UpperCamelCase =compare_against_test(
os.path.join(A_ , A_ ) , A_ , A_ , A_ )
__UpperCamelCase ='\n'.join(A_ )
if special_strings is not None:
for string in special_strings:
__UpperCamelCase =diff.replace(A_ , '' )
self.assertEqual(A_ , '' )
def _a ( self ) -> Dict:
self.one_complete_example('complete_nlp_example.py' , A_ )
self.one_complete_example('complete_nlp_example.py' , A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
__UpperCamelCase =[
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , A_ , A_ , A_ )
self.one_complete_example('complete_cv_example.py' , A_ , A_ , A_ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = False
@classmethod
def _a ( cls ) -> Union[str, Any]:
super().setUpClass()
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase =['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def _a ( cls ) -> Union[str, Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _a ( self ) -> List[Any]:
__UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__UpperCamelCase =run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__UpperCamelCase =run_command(self._launch_args + testargs , return_stdout=A_ )
self.assertNotIn('epoch 0:' , A_ )
self.assertIn('epoch 1:' , A_ )
def _a ( self ) -> int:
__UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__UpperCamelCase =run_command(self._launch_args + testargs , return_stdout=A_ )
if torch.cuda.is_available():
__UpperCamelCase =torch.cuda.device_count()
else:
__UpperCamelCase =1
if num_processes > 1:
self.assertNotIn('epoch 0:' , A_ )
self.assertIn('epoch 1:' , A_ )
else:
self.assertIn('epoch 0:' , A_ )
self.assertIn('epoch 1:' , A_ )
@slow
def _a ( self ) -> Optional[Any]:
__UpperCamelCase ='\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
__UpperCamelCase =run_command(self._launch_args + testargs , return_stdout=A_ )
__UpperCamelCase =re.findall('({.+})' , A_ )
__UpperCamelCase =[r for r in results if 'accuracy' in r][-1]
__UpperCamelCase =ast.literal_eval(A_ )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def _a ( self ) -> str:
__UpperCamelCase =['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _a ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCamelCase =f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(A_ , 'tracking' ) ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def _a ( self ) -> List[Any]:
__UpperCamelCase =['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 62
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 334
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , )
)
def _A ( ):
"""simple docstring"""
__lowercase =[90, 23, 6, 33, 21, 65, 123, 34_423]
__lowercase =math.log(len(_lowerCAmelCase ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 366
|
'''simple docstring'''
from __future__ import annotations
import requests
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_lowerCAmelCase ).json()
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase ='https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
__lowercase =requests.get(_lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCAmelCase ) for story_id in story_ids]
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase =hackernews_top_stories(_lowerCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Dict = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 115
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""note_seq"""]
def __init__( self : List[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : List[str] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
| 115
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModel(config=_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTForMaskedImageModeling(config=_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFDeiTForMaskedImageModeling(_A )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = TFDeiTForImageClassification(_A )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFDeiTForImageClassification(_A )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_a : Optional[Any] = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_a : str = False
_a : str = False
_a : List[str] = False
_a : Optional[int] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Dense ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFDeiTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="tf" )
# forward pass
__lowerCAmelCase = model(**_A )
# verify the logits
__lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
__lowerCAmelCase = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 102
|
import enum
import shutil
import sys
UpperCamelCase__ , UpperCamelCase__ = shutil.get_terminal_size()
UpperCamelCase__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class a__ ( enum.Enum ):
_a : Any = 0
_a : Dict = 1
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict="" ):
sys.stdout.write(str(SCREAMING_SNAKE_CASE_ ) + end )
sys.stdout.flush()
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str="" ):
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , SCREAMING_SNAKE_CASE_ )
def _a ( ):
forceWrite("\r" )
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ):
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def _a ( ):
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def _a ( ):
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 102
| 1
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(SCREAMING_SNAKE_CASE ) * abs(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 108
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A__ :
def __init__( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = data
lowerCAmelCase__ : List[Any] = [0X67_452_301, 0Xef_cda_b89, 0X98_bad_cfe, 0X10_325_476, 0Xc3_d2e_1f0]
@staticmethod
def _lowerCamelCase ( a : int , a : List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_fff_fff
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = b'\x80' + b'\x00' * (63 - (len(self.data ) + 8) % 64)
lowerCAmelCase__ : List[str] = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Tuple , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = list(struct.unpack('>16L' , a ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCAmelCase__ : int = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.padding()
lowerCAmelCase__ : List[Any] = self.split_blocks()
for block in self.blocks:
lowerCAmelCase__ : str = self.expand_block(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCAmelCase__ : Tuple = (b & c) | ((~b) & d)
lowerCAmelCase__ : int = 0X5a_827_999
elif 20 <= i < 40:
lowerCAmelCase__ : List[str] = b ^ c ^ d
lowerCAmelCase__ : Any = 0X6e_d9e_ba1
elif 40 <= i < 60:
lowerCAmelCase__ : Tuple = (b & c) | (b & d) | (c & d)
lowerCAmelCase__ : Tuple = 0X8f_1bb_cdc
elif 60 <= i < 80:
lowerCAmelCase__ : List[Any] = b ^ c ^ d
lowerCAmelCase__ : int = 0Xca_62c_1d6
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = (
self.rotate(a , 5 ) + f + e + k + expanded_block[i] & 0Xff_fff_fff,
a,
self.rotate(a , 30 ),
c,
d,
)
lowerCAmelCase__ : Optional[Any] = (
self.h[0] + a & 0Xff_fff_fff,
self.h[1] + b & 0Xff_fff_fff,
self.h[2] + c & 0Xff_fff_fff,
self.h[3] + d & 0Xff_fff_fff,
self.h[4] + e & 0Xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Optional[int] = b'Test String'
assert SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE_ ).hexdigest() # noqa: S324
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : str = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowerCAmelCase__ : Dict = parser.parse_args()
lowerCAmelCase__ : Tuple = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowerCAmelCase__ : List[Any] = f.read()
else:
lowerCAmelCase__ : Tuple = bytes(SCREAMING_SNAKE_CASE_ , 'utf-8' )
print(SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 212
| 0
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
__A : List[Any] = TypeVar('T')
__A : Dict = Union[List[T], Tuple[T, ...]]
__A : str = Union[T, List[T], Dict[str, T]]
__A : Optional[Any] = Union[str, bytes, os.PathLike]
| 369
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__A : List[str] = logging.getLogger(__name__)
def __UpperCamelCase ( ) ->int:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=_A , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=_A , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=_A , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=_A , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=_A , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=_A , type=_A , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=_A , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=_A , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCamelCase_ =parser.parse_args()
return args
def __UpperCamelCase ( _A : Dict ) ->Optional[int]:
"""simple docstring"""
def fn(_A : List[Any] ):
return tokenizer(examples["""text"""] )
return fn
def __UpperCamelCase ( _A : Dict ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =[]
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCamelCase_ ={
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCamelCase_ =tf.train.Features(feature=_A )
lowerCamelCase_ =tf.train.Example(features=_A )
lowerCamelCase_ =example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : Any ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCamelCase_ =min(len(_A ) , args.limit )
lowerCamelCase_ =dataset.select(range(_A ) )
print(f'Limiting the dataset to {args.limit} entries.' )
lowerCamelCase_ =AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCamelCase_ =os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCamelCase_ =os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCamelCase_ =tokenize_function(_A )
lowerCamelCase_ =dataset.map(_A , batched=_A , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : Any ):
# Concatenate all texts.
lowerCamelCase_ ={k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCamelCase_ =len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCamelCase_ =(total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCamelCase_ ={
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCamelCase_ =dataset_tokenized.map(_A , batched=_A , batch_size=1000 , num_proc=4 )
lowerCamelCase_ =0
lowerCamelCase_ =0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCamelCase_ =grouped_dataset[shard : shard + args.shard_size]
lowerCamelCase_ =len(dataset_snapshot["""input_ids"""] )
lowerCamelCase_ =os.path.join(_A , f'dataset-{shard_count}-{records_containing}.tfrecord' )
lowerCamelCase_ =get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCamelCase_ =serialized_examples[i]
out_file.write(_A )
print("""Wrote file {} containing {} records""".format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(f'split-{args.split}-records-count.txt' , """w""" ) as f:
print(f'Total {args.split} records: {total_records}' , file=_A )
if __name__ == "__main__":
__A : Dict = parse_args()
main(args)
| 49
| 0
|
from ..utils import DummyObject, requires_backends
class _a (metaclass=__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Tuple = ['''torch''', '''torchsde''']
def __init__( self , *A__ , **A__ ):
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def __A ( cls , *A__ , **A__ ):
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def __A ( cls , *A__ , **A__ ):
requires_backends(cls , ["""torch""", """torchsde"""] )
| 192
|
import argparse
from collections import defaultdict
import yaml
A_ : List[str] = 'docs/source/en/_toctree.yml'
def UpperCamelCase (lowercase_: Optional[int] ) -> List[str]:
A__ : Dict = defaultdict(lowercase_ )
A__ : Optional[int] = []
A__ : Union[str, Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(lowercase_ )
A__ : Optional[int] = new_doc_list
A__ : Optional[int] = [key for key, value in counts.items() if value > 1]
A__ : Optional[Any] = []
for duplicate_key in duplicates:
A__ : List[Any] = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(lowercase_ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
A__ : Dict = sorted(lowercase_ , key=lambda lowercase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowercase_ ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(lowercase_ )
# Sort
return overview_doc
def UpperCamelCase (lowercase_: Tuple=False ) -> List[Any]:
with open(lowercase_ , encoding="""utf-8""" ) as f:
A__ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
A__ : List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ : Union[str, Any] = content[api_idx]["""sections"""]
# Then to the model doc
A__ : Dict = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
A__ : List[Any] = api_doc[scheduler_idx]["""sections"""]
A__ : Union[str, Any] = clean_doc_toc(lowercase_ )
A__ : Optional[int] = False
if new_scheduler_doc != scheduler_doc:
A__ : List[Any] = True
if overwrite:
A__ : Optional[int] = new_scheduler_doc
if diff:
if overwrite:
A__ : Tuple = api_doc
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def UpperCamelCase (lowercase_: Dict=False ) -> Optional[Any]:
with open(lowercase_ , encoding="""utf-8""" ) as f:
A__ : int = yaml.safe_load(f.read() )
# Get to the API doc
A__ : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ : List[str] = content[api_idx]["""sections"""]
# Then to the model doc
A__ : List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
A__ : Dict = False
A__ : Tuple = api_doc[pipeline_idx]["""sections"""]
A__ : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
A__ : List[Any] = pipeline_doc["""section"""]
A__ : Dict = clean_doc_toc(lowercase_ )
if overwrite:
A__ : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(lowercase_ )
# sort overall pipeline doc
A__ : Optional[int] = clean_doc_toc(lowercase_ )
if new_pipeline_docs != pipeline_docs:
A__ : int = True
if overwrite:
A__ : List[Any] = new_pipeline_docs
if diff:
if overwrite:
A__ : Union[str, Any] = api_doc
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A_ : str = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 192
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
snake_case_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
snake_case_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def _lowerCAmelCase ( ):
UpperCAmelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCAmelCase = bs[:]
UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase = [chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ , lowercase_ ) )
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = set()
UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase = char
return pairs
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :str , lowercase_ :List[Any] , lowercase_ :Optional[int] , lowercase_ :Dict="replace" , lowercase_ :int="<s>" , lowercase_ :int="</s>" , lowercase_ :str="</s>" , lowercase_ :Any="<s>" , lowercase_ :str="<unk>" , lowercase_ :Optional[int]="<pad>" , lowercase_ :Dict="<mask>" , lowercase_ :Union[str, Any]=False , **lowercase_ :Optional[int] , ) -> List[Any]:
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else unk_token
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase = json.load(lowercase_ )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
UpperCAmelCase = errors # how to handle errors in decoding
UpperCAmelCase = bytes_to_unicode()
UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase_ , encoding='utf-8' ) as merges_handle:
UpperCAmelCase = merges_handle.read().split('\n' )[1:-1]
UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = {}
UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCAmelCase__ ( self :Any ) -> List[str]:
return len(self.encoder )
def UpperCAmelCase__ ( self :str ) -> Tuple:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Dict ) -> List[str]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase = tuple(lowercase_ )
UpperCAmelCase = get_pairs(lowercase_ )
if not pairs:
return token
while True:
UpperCAmelCase = min(lowercase_ , key=lambda lowercase_ : self.bpe_ranks.get(lowercase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase = bigram
UpperCAmelCase = []
UpperCAmelCase = 0
while i < len(lowercase_ ):
try:
UpperCAmelCase = word.index(lowercase_ , lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase = tuple(lowercase_ )
UpperCAmelCase = new_word
if len(lowercase_ ) == 1:
break
else:
UpperCAmelCase = get_pairs(lowercase_ )
UpperCAmelCase = ' '.join(lowercase_ )
UpperCAmelCase = word
return word
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :List[Any] ) -> Tuple:
UpperCAmelCase = []
for token in re.findall(self.pat , lowercase_ ):
UpperCAmelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase_ ).split(' ' ) )
return bpe_tokens
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Dict ) -> Dict:
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self :str , lowercase_ :str ) -> Any:
return self.decoder.get(lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :Any ) -> Union[str, Any]:
UpperCAmelCase = ''.join(lowercase_ )
UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + '\n' )
UpperCAmelCase = 0
with open(lowercase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
UpperCAmelCase = token_index
writer.write(' '.join(lowercase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None , lowercase_ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self :Any , lowercase_ :List[str] , lowercase_ :Optional[Any]=False , **lowercase_ :List[Any] ) -> Any:
UpperCAmelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase = ' ' + text
return (text, kwargs)
| 181
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = (UniPCMultistepScheduler,)
__UpperCamelCase = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self :Any , **lowercase_ :Optional[Any] ) -> Dict:
UpperCAmelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**lowercase_ )
return config
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int]=0 , **lowercase_ :Optional[Any] ) -> List[str]:
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('num_inference_steps' , lowercase_ )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase , UpperCAmelCase = sample, sample
for t in range(lowercase_ , time_step + scheduler.config.solver_order + 1 ):
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict=0 , **lowercase_ :Optional[int] ) -> List[str]:
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('num_inference_steps' , lowercase_ )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int]=None , **lowercase_ :str ) -> Dict:
if scheduler is None:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = 10
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = model(lowercase_ , lowercase_ )
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Tuple:
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('num_inference_steps' , lowercase_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , 'set_timesteps' ):
UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
UpperCAmelCase = scheduler.timesteps[5]
UpperCAmelCase = scheduler.timesteps[6]
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
UpperCAmelCase = self.full_loop(scheduler=lowercase_ )
UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase = self.full_loop(scheduler=lowercase_ )
UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def UpperCAmelCase__ ( self :Optional[int] ) -> List[str]:
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCAmelCase__ ( self :int ) -> str:
self.check_over_configs(thresholding=lowercase_ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , solver_order=lowercase_ , solver_type=lowercase_ , )
def UpperCAmelCase__ ( self :Any ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCAmelCase__ ( self :str ) -> Optional[Any]:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase_ , solver_type=lowercase_ , prediction_type=lowercase_ , )
UpperCAmelCase = self.full_loop(
solver_order=lowercase_ , solver_type=lowercase_ , prediction_type=lowercase_ , )
assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self :Dict ) -> List[Any]:
self.check_over_configs(lower_order_final=lowercase_ )
self.check_over_configs(lower_order_final=lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowercase_ , time_step=0 )
def UpperCAmelCase__ ( self :int ) -> Union[str, Any]:
UpperCAmelCase = self.full_loop()
UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def UpperCAmelCase__ ( self :Tuple ) -> Tuple:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(thresholding=lowercase_ , dynamic_thresholding_ratio=0 )
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = 10
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = model(lowercase_ , lowercase_ )
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self :List[Any] , **lowercase_ :Optional[Any] ) -> Dict:
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase = scheduler_class(**lowercase_ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 181
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
a__ : str = XLMTokenizer
a__ : Optional[Any] = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
snake_case : Any = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case : int = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : str = "lower newer"
snake_case : List[Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = XLMTokenizer(self.vocab_file , self.merges_file )
snake_case : Dict = "lower"
snake_case : List[Any] = ["low", "er</w>"]
snake_case : Union[str, Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case : int = tokens + ["<unk>"]
snake_case : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
snake_case : int = tokenizer.encode("sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case : List[str] = tokenizer.encode("multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 148
|
"""simple docstring"""
def _lowercase ( ) -> int:
return 1
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__snake_case )
def _lowercase ( __snake_case = 200 ) -> int:
return two_pound(__snake_case )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 269
| 0
|
'''simple docstring'''
import math
import os
import sys
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
__magic_name__ = ''''''
try:
with open(a , '''rb''' ) as binary_file:
__magic_name__ = binary_file.read()
for dat in data:
__magic_name__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def UpperCamelCase ( a , a , a , a ) -> None:
'''simple docstring'''
lexicon.pop(a )
__magic_name__ = last_match_id
if math.loga(a ).is_integer():
for curr_key in lexicon:
__magic_name__ = '''0''' + lexicon[curr_key]
__magic_name__ = bin(a )[2:]
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
__magic_name__ = {'''0''': '''0''', '''1''': '''1'''}
__magic_name__ , __magic_name__ = '''''', ''''''
__magic_name__ = len(a )
for i in range(len(a ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__magic_name__ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(a , a , a , a )
index += 1
__magic_name__ = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__magic_name__ = lexicon[curr_string]
result += last_match_id
return result
def UpperCamelCase ( a , a ) -> str:
'''simple docstring'''
__magic_name__ = os.path.getsize(a )
__magic_name__ = bin(a )[2:]
__magic_name__ = len(a )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCamelCase ( a , a ) -> None:
'''simple docstring'''
__magic_name__ = 8
try:
with open(a , '''wb''' ) as opened_file:
__magic_name__ = [
to_write[i : i + byte_length]
for i in range(0 , len(a ) , a )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(a , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def UpperCamelCase ( a , a ) -> None:
'''simple docstring'''
__magic_name__ = read_file_binary(a )
__magic_name__ = compress_data(a )
__magic_name__ = add_file_length(a , a )
write_file_binary(a , a )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 98
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_lowerCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_lowerCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :str = """whisper"""
__SCREAMING_SNAKE_CASE :str = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE :Tuple = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Dict , a__ : Optional[int]=5_1865 , a__ : str=80 , a__ : List[str]=6 , a__ : List[str]=4 , a__ : List[Any]=6 , a__ : Union[str, Any]=4 , a__ : Tuple=1536 , a__ : Optional[int]=1536 , a__ : List[str]=0.0 , a__ : Union[str, Any]=0.0 , a__ : Union[str, Any]=5_0257 , a__ : Dict=True , a__ : Optional[Any]=True , a__ : Union[str, Any]="gelu" , a__ : Tuple=256 , a__ : Dict=0.0 , a__ : str=0.0 , a__ : Optional[Any]=0.0 , a__ : int=0.02 , a__ : Any=False , a__ : List[Any]=1500 , a__ : Optional[int]=448 , a__ : Dict=5_0256 , a__ : str=5_0256 , a__ : Tuple=5_0256 , a__ : List[str]=None , a__ : List[Any]=[220, 5_0256] , a__ : Any=False , a__ : Dict=256 , a__ : Optional[Any]=False , a__ : str=0.05 , a__ : List[Any]=10 , a__ : List[Any]=2 , a__ : Optional[int]=0.0 , a__ : List[Any]=10 , a__ : Union[str, Any]=0 , a__ : int=7 , **a__ : Any , ):
__magic_name__ = vocab_size
__magic_name__ = num_mel_bins
__magic_name__ = d_model
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = encoder_ffn_dim
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
__magic_name__ = max_source_positions
__magic_name__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__magic_name__ = classifier_proj_size
__magic_name__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ = apply_spec_augment
__magic_name__ = mask_time_prob
__magic_name__ = mask_time_length
__magic_name__ = mask_time_min_masks
__magic_name__ = mask_feature_prob
__magic_name__ = mask_feature_length
__magic_name__ = mask_feature_min_masks
__magic_name__ = median_filter_width
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , suppress_tokens=a__ , begin_suppress_tokens=a__ , **a__ , )
class _SCREAMING_SNAKE_CASE ( __a ):
@property
def snake_case__ ( self : List[str] ):
__magic_name__ = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
__magic_name__ = {0: '''batch'''}
else:
__magic_name__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction='''inputs''' )
return common_inputs
def snake_case__ ( self : Optional[int] , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 2_2050 , a__ : float = 5.0 , a__ : int = 220 , ):
__magic_name__ = OrderedDict()
__magic_name__ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a__ , framework=a__ , sampling_rate=a__ , time_duration=a__ , frequency=a__ , )
__magic_name__ = encoder_inputs['''input_features'''].shape[2]
__magic_name__ = encoder_sequence_length // 2 if self.use_past else seq_length
__magic_name__ = super().generate_dummy_inputs(
preprocessor.tokenizer , a__ , a__ , a__ , a__ )
__magic_name__ = encoder_inputs.pop('''input_features''' )
__magic_name__ = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
__magic_name__ = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def snake_case__ ( self : Dict ):
return 1E-3
| 98
| 1
|
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = u
for i in range(1 , lowerCamelCase__ ):
lowerCamelCase_ = temp * (u - i)
return temp
def lowerCamelCase_ ( ):
lowerCamelCase_ = int(input("enter the numbers of values: " ) )
lowerCamelCase_ = []
for _ in range(lowerCamelCase__ ):
y.append([] )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
y[i].append(lowerCamelCase__ )
lowerCamelCase_ = 0
print("enter the values of parameters in a list: " )
lowerCamelCase_ = list(map(lowerCamelCase__ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(lowerCamelCase__ ):
lowerCamelCase_ = float(input() )
lowerCamelCase_ = int(input("enter the value to interpolate: " ) )
lowerCamelCase_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCamelCase__ ):
for j in range(n - i ):
lowerCamelCase_ = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase_ = y[0][0]
for i in range(1 , lowerCamelCase__ ):
summ += (ucal(lowerCamelCase__ , lowerCamelCase__ ) * y[0][i]) / math.factorial(lowerCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 19
|
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = StableDiffusionDiffEditPipeline
_lowerCamelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
_lowerCamelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
_lowerCamelCase : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase : Union[str, Any] = frozenset([] )
def __A ( self : Any ):
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , )
A_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
A_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_zero=UpperCAmelCase , )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
A_ = CLIPTextModel(UpperCAmelCase )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=0 ):
A_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
A_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if str(UpperCAmelCase ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase )
else:
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
A_ = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __A ( self : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=0 ):
A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("RGB" )
if str(UpperCAmelCase ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase )
else:
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
A_ = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __A ( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : Any=0 ):
A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("RGB" )
if str(UpperCAmelCase ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase )
else:
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
A_ = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def __A ( self : List[str] ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
A_ = self.get_dummy_inputs(UpperCAmelCase )
A_ = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
A_ = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase , UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
A_ = self.get_dummy_inputs(UpperCAmelCase )
A_ = pipe_loaded(**UpperCAmelCase )[0]
A_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCAmelCase , 1E-4 )
def __A ( self : List[Any] ):
A_ = "cpu"
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = self.get_dummy_mask_inputs(UpperCAmelCase )
A_ = pipe.generate_mask(**UpperCAmelCase )
A_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
A_ = np.array([0] * 9 )
A_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __A ( self : Tuple ):
A_ = "cpu"
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = self.get_dummy_inversion_inputs(UpperCAmelCase )
A_ = pipe.invert(**UpperCAmelCase ).images
A_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
A_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
A_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
def __A ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __A ( self : Dict ):
A_ = "cpu"
A_ = self.get_dummy_components()
A_ = {"beta_start": 0.00_085, "beta_end": 0.012, "beta_schedule": "scaled_linear"}
A_ = DPMSolverMultistepScheduler(**UpperCAmelCase )
A_ = DPMSolverMultistepInverseScheduler(**UpperCAmelCase )
A_ = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = self.get_dummy_inversion_inputs(UpperCAmelCase )
A_ = pipe.invert(**UpperCAmelCase ).images
A_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
A_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
A_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Tuple ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __A ( cls : Optional[Any] ):
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
A_ = raw_image.convert("RGB" ).resize((768, 768) )
A_ = raw_image
def __A ( self : List[str] ):
A_ = torch.manual_seed(0 )
A_ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
A_ = DDIMScheduler.from_config(pipe.scheduler.config )
A_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = "a bowl of fruit"
A_ = "a bowl of pears"
A_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
A_ = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase ).latents
A_ = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
A_ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __A ( self : Tuple ):
A_ = torch.manual_seed(0 )
A_ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
A_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = "a bowl of fruit"
A_ = "a bowl of pears"
A_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
A_ = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase , num_inference_steps=25 , ).latents
A_ = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
A_ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 370
|
from math import isqrt, loga
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ):
A_ = False
return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]]
def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ):
"""simple docstring"""
A_ = degree * loga(__UpperCamelCase )
A_ = int(__UpperCamelCase )
A_ = calculate_prime_numbers(__UpperCamelCase )
A_ = 0
A_ = 0
A_ = len(__UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 329
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[str] = 'openai-gpt'
_SCREAMING_SNAKE_CASE : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _UpperCamelCase=40478 , _UpperCamelCase=512 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1E-5 , _UpperCamelCase=0.0_2 , _UpperCamelCase="cls_index" , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=0.1 , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : str = vocab_size
_lowercase : Union[str, Any] = n_positions
_lowercase : int = n_embd
_lowercase : List[str] = n_layer
_lowercase : List[str] = n_head
_lowercase : Tuple = afn
_lowercase : Optional[Any] = resid_pdrop
_lowercase : Union[str, Any] = embd_pdrop
_lowercase : List[str] = attn_pdrop
_lowercase : Any = layer_norm_epsilon
_lowercase : int = initializer_range
_lowercase : List[Any] = summary_type
_lowercase : List[str] = summary_use_proj
_lowercase : List[Any] = summary_activation
_lowercase : List[str] = summary_first_dropout
_lowercase : str = summary_proj_to_labels
super().__init__(**_UpperCamelCase )
| 250
|
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case ) -> float:
_lowercase : Optional[Any] = 0.00
_lowercase : Dict = 0
for resistor in resistors:
if resistor <= 0:
_lowercase : Union[str, Any] = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(snake_case )
first_sum += 1 / float(snake_case )
index += 1
return 1 / first_sum
def _A ( snake_case ) -> float:
_lowercase : Dict = 0.00
_lowercase : List[str] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowercase : Dict = F'''Resistor at index {index} has a negative value!'''
raise ValueError(snake_case )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
@slow
def snake_case_ ( self ):
_lowerCamelCase = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
_lowerCamelCase = {
'input_ids': tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
_lowerCamelCase = model(a__ )['last_hidden_state']
_lowerCamelCase = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , a__ )
# compare the actual values for a slice.
_lowerCamelCase = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 80
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
_lowerCamelCase = [True] * (num + 1)
_lowerCamelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case ):
_lowerCamelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[int] =int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 80
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=18 , __SCREAMING_SNAKE_CASE : str=30 , __SCREAMING_SNAKE_CASE : List[str]=400 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Optional[int]=True , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size_divisor
__a = do_rescale
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Tuple = GLPNImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = GLPNImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size_divisor'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''resample'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_rescale'''))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
pass
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
| 49
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49
| 1
|
'''simple docstring'''
def UpperCamelCase ( a , a ) -> List[Any]:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__magic_name__ = str(bin(a ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(a ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(a ) , len(a ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(a ) , b_binary.zfill(a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCAmelCase = "\\n\n"
_lowerCAmelCase = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_lowerCAmelCase = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def snake_case__ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def snake_case__ ( self : Optional[int] , a__ : int , a__ : Dict , a__ : int = 16 , a__ : bool = True , a__ : Any=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__magic_name__ = '''cuda'''
else:
__magic_name__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__magic_name__ = AutoModelForCausalLM.from_pretrained(a__ )
__magic_name__ = model.to(a__ )
__magic_name__ = AutoTokenizer.from_pretrained(a__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__magic_name__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(a__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__magic_name__ = model.config.max_length - 1
else:
__magic_name__ = model.config.max_length
__magic_name__ = tokenizer(
a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors='''pt''' , return_attention_mask=a__ , ).to(a__ )
__magic_name__ = encodings['''input_ids''']
__magic_name__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__magic_name__ = []
__magic_name__ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(a__ ) , a__ ) ):
__magic_name__ = min(start_index + batch_size , len(a__ ) )
__magic_name__ = encoded_texts[start_index:end_index]
__magic_name__ = attn_masks[start_index:end_index]
if add_start_token:
__magic_name__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(a__ )
__magic_name__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__magic_name__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(a__ ), attn_mask] , dim=1 )
__magic_name__ = encoded_batch
with torch.no_grad():
__magic_name__ = model(a__ , attention_mask=a__ ).logits
__magic_name__ = out_logits[..., :-1, :].contiguous()
__magic_name__ = labels[..., 1:].contiguous()
__magic_name__ = attn_mask[..., 1:].contiguous()
__magic_name__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , a__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a__ )}
| 98
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Optional[Any] = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""GLPNFeatureExtractor"""]
__lowerCamelCase : int = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52
|
def a__ ( __UpperCamelCase = 1_0_0_0 ):
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE_ = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE_ = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE_ = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 118
| 0
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A_ = datasets.utils.logging.get_logger(__name__)
A_ = ['''names''', '''prefix''']
A_ = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
A_ = ['''encoding_errors''', '''on_bad_lines''']
A_ = ['''date_format''']
@dataclass
class __SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
snake_case_ = ","
snake_case_ = None
snake_case_ = "infer"
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = False
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = None
snake_case_ = "."
snake_case_ = None
snake_case_ = '"'
snake_case_ = 0
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = True
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = None
snake_case_ = 10000
snake_case_ = None
snake_case_ = "strict"
snake_case_ = "error"
snake_case_ = None
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.delimiter is not None:
A__ : Tuple = self.delimiter
if self.column_names is not None:
A__ : str = self.column_names
@property
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[Any] = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , snake_case ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
snake_case_ = CsvConfig
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(snake_case , snake_case ):
A__ : List[str] = [files]
A__ : Union[str, Any] = [dl_manager.iter_files(snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : str = []
for split_name, files in data_files.items():
if isinstance(snake_case , snake_case ):
A__ : List[Any] = [files]
A__ : List[str] = [dl_manager.iter_files(snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={"""files""": files} ) )
return splits
def _UpperCamelCase ( self : List[str] , snake_case : pa.Table ):
'''simple docstring'''
if self.config.features is not None:
A__ : int = self.config.features.arrow_schema
if all(not require_storage_cast(snake_case ) for feature in self.config.features.values() ):
# cheaper cast
A__ : int = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=snake_case )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A__ : Dict = table_cast(snake_case , snake_case )
return pa_table
def _UpperCamelCase ( self : int , snake_case : Dict ):
'''simple docstring'''
A__ : Optional[int] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A__ : int = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(snake_case ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case ) ):
A__ : Optional[int] = pd.read_csv(snake_case , iterator=snake_case , dtype=snake_case , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(snake_case ):
A__ : Union[str, Any] = pa.Table.from_pandas(snake_case )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(snake_case )}: {e}' )
raise
| 296
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ = '''src/diffusers'''
A_ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
A_ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ = spec.loader.load_module()
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[Any] ) ->Any:
return line.startswith(UpperCAmelCase__ ) or len(UpperCAmelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""", UpperCAmelCase__ ) is not None
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
A__ : Any = object_name.split(""".""" )
A__ : int = 0
# First let's find the module where our object lives.
A__ : str = parts[i]
while i < len(UpperCAmelCase__ ) and not os.path.isfile(os.path.join(UpperCAmelCase__, f'{module}.py' ) ):
i += 1
if i < len(UpperCAmelCase__ ):
A__ : Union[str, Any] = os.path.join(UpperCAmelCase__, parts[i] )
if i >= len(UpperCAmelCase__ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(UpperCAmelCase__, f'{module}.py' ), """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : List[Any] = f.readlines()
# Now let's find the class / func in the code!
A__ : Optional[Any] = """"""
A__ : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase__ ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)', lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A__ : List[Any] = line_index
while line_index < len(UpperCAmelCase__ ) and _should_continue(lines[line_index], UpperCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : List[Any] = lines[start_index:line_index]
return "".join(UpperCAmelCase__ )
A_ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
A_ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
A_ = re.compile(r'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Optional[Any]:
A__ : Dict = code.split("""\n""" )
A__ : List[Any] = 0
while idx < len(UpperCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase__ ):
return re.search(R"""^(\s*)\S""", lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->int:
A__ : str = len(get_indent(UpperCAmelCase__ ) ) > 0
if has_indent:
A__ : Union[str, Any] = f'class Bla:\n{code}'
A__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_1_9, preview=UpperCAmelCase__ )
A__ : Tuple = black.format_str(UpperCAmelCase__, mode=UpperCAmelCase__ )
A__ , A__ : List[Any] = style_docstrings_in_code(UpperCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _lowerCAmelCase ( UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict=False ) ->List[Any]:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : int = f.readlines()
A__ : Dict = []
A__ : List[str] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase__ ):
A__ : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A__ , A__ , A__ : Dict = search.groups()
A__ : Tuple = find_code_in_diffusers(UpperCAmelCase__ )
A__ : int = get_indent(UpperCAmelCase__ )
A__ : List[str] = line_index + 1 if indent == theoretical_indent else line_index + 2
A__ : Tuple = theoretical_indent
A__ : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A__ : Tuple = True
while line_index < len(UpperCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
break
A__ : Optional[int] = lines[line_index]
A__ : Tuple = _should_continue(UpperCAmelCase__, UpperCAmelCase__ ) and re.search(f'^{indent}# End copy', UpperCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : Dict = lines[start_index:line_index]
A__ : Tuple = """""".join(UpperCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
A__ : Optional[int] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase__ ) is None]
A__ : Optional[Any] = """\n""".join(UpperCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase__ ) > 0:
A__ : int = replace_pattern.replace("""with""", """""" ).split(""",""" )
A__ : List[Any] = [_re_replace_pattern.search(UpperCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A__ , A__ , A__ : Union[str, Any] = pattern.groups()
A__ : Union[str, Any] = re.sub(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if option.strip() == "all-casing":
A__ : List[Any] = re.sub(obja.lower(), obja.lower(), UpperCAmelCase__ )
A__ : Tuple = re.sub(obja.upper(), obja.upper(), UpperCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A__ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
A__ : List[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A__ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
A__ : Tuple = start_index + 1
if overwrite and len(UpperCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(UpperCAmelCase__, """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
return diffs
def _lowerCAmelCase ( UpperCAmelCase__ : bool = False ) ->Any:
A__ : Dict = glob.glob(os.path.join(UpperCAmelCase__, """**/*.py""" ), recursive=UpperCAmelCase__ )
A__ : str = []
for filename in all_files:
A__ : Any = is_copy_consistent(UpperCAmelCase__, UpperCAmelCase__ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(UpperCAmelCase__ ) > 0:
A__ : Any = """\n""".join(UpperCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 296
| 1
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = prime_factors(__lowerCAmelCase )
if is_square_free(__lowerCAmelCase ):
return -1 if len(__lowerCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 136
| 1
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCAmelCase__ ( UpperCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
lowercase_ : int = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = None
lowercase_ : List[Any] = None
lowercase_ : Any = None
lowercase_ : Tuple = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
lowercase_ : int = self.builder.as_dataset(
split='''train''' , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
lowercase_ : str = dataset
lowercase_ : Any = name
lowercase_ : Dict = con
lowercase_ : Dict = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : List[str] = num_proc
lowercase_ : Union[str, Any] = to_sql_kwargs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = self.to_sql_kwargs.pop('''sql''' , _a )
lowercase_ : Optional[Any] = self.to_sql_kwargs.pop('''con''' , _a )
lowercase_ : List[str] = self.to_sql_kwargs.pop('''index''' , _a )
lowercase_ : Optional[int] = self._write(index=_a , **self.to_sql_kwargs )
return written
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = args
lowercase_ : List[Any] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
lowercase_ : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Any = batch.to_pandas()
lowercase_ : Any = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 362
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_lowercase : Tuple = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = super().to_dict()
for k, v in d.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : str = v.to_dict()
return d
| 264
| 0
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( a__ , a__ , a__ , a__ ) -> Any:
# Initialise PyTorch model
__SCREAMING_SNAKE_CASE = BigBirdConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
__SCREAMING_SNAKE_CASE = BigBirdForQuestionAnswering(a__ )
else:
__SCREAMING_SNAKE_CASE = BigBirdForPreTraining(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(a__ , a__ , is_trivia_qa=a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
lowerCAmelCase__ : Optional[Any] =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 257
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ : Optional[Any] ={
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =['''MobileNetV2FeatureExtractor''']
lowerCAmelCase__ : str =['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] =[
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 257
| 1
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , lowerCamelCase__ , )
class __A ( lowerCamelCase__ ):
__A = RobertaConfig
__A = 'roberta'
def __init__( self , UpperCAmelCase_ ):
super().__init__(lowercase__ )
lowerCamelCase =RobertaEmbeddings(lowercase__ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. """ , lowerCamelCase__ , )
class __A ( lowerCamelCase__ ):
__A = RobertaConfig
__A = 'roberta'
def __init__( self , UpperCAmelCase_ ):
super().__init__(lowercase__ )
lowerCamelCase =config.num_labels
lowerCamelCase =config.num_hidden_layers
lowerCamelCase =DeeRobertaModel(lowercase__ )
lowerCamelCase =nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase =nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase__ )
def _snake_case ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=-1 , UpperCAmelCase_=False , ):
lowerCamelCase =self.num_layers
try:
lowerCamelCase =self.roberta(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , position_ids=lowercase__ , head_mask=lowercase__ , inputs_embeds=lowercase__ , )
lowerCamelCase =outputs[1]
lowerCamelCase =self.dropout(lowercase__ )
lowerCamelCase =self.classifier(lowercase__ )
lowerCamelCase =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCamelCase =e.message
lowerCamelCase =e.exit_layer
lowerCamelCase =outputs[0]
if not self.training:
lowerCamelCase =entropy(lowercase__ )
lowerCamelCase =[]
lowerCamelCase =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCamelCase =MSELoss()
lowerCamelCase =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase =CrossEntropyLoss()
lowerCamelCase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCamelCase =[]
for highway_exit in outputs[-1]:
lowerCamelCase =highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCamelCase =MSELoss()
lowerCamelCase =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase =CrossEntropyLoss()
lowerCamelCase =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase__ )
if train_highway:
lowerCamelCase =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCamelCase =(loss,) + outputs
if not self.training:
lowerCamelCase =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCamelCase =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 358
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase__ : Union[str, Any] =logging.getLogger(__name__)
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
return (preds == labels).mean()
@dataclass
class __A :
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __A :
__A = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__A = field(metadata={"""help""": """Should contain the data files for the task."""} )
__A = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowercase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase , lowerCamelCase , lowerCamelCase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
lowerCamelCase =processors[data_args.task_name]()
lowerCamelCase =processor.get_labels()
lowerCamelCase =len(_UpperCAmelCase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowerCamelCase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_UpperCAmelCase ) -> Dict:
lowerCamelCase =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_UpperCAmelCase , p.label_ids )}
# Data collator
lowerCamelCase =DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase =Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase =trainer.evaluate()
lowerCamelCase =os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(_UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , _UpperCAmelCase , _UpperCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_UpperCAmelCase )
return results
def _lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 262
| 0
|
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Union[str, Any] =word.split()
def justify(SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> str:
a__ : Optional[Any] =max_width - width
a__ : Optional[int] =len(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
a__ : List[str] =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
a__ : Union[str, Any] =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
a__ : Union[str, Any] =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(SCREAMING_SNAKE_CASE ):
num_spaces_between_words_list[i] += 1
a__ : Any =[]
for i in range(SCREAMING_SNAKE_CASE ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =[]
a__ : list[str] =[]
a__ : Tuple =0
for word in words:
if width + len(SCREAMING_SNAKE_CASE ) + len(SCREAMING_SNAKE_CASE ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(SCREAMING_SNAKE_CASE )
width += len(SCREAMING_SNAKE_CASE )
else:
# justify the line and add it to result
answer.append(justify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# reset new line and new width
a__ , a__ : str =[word], len(SCREAMING_SNAKE_CASE )
a__ : List[Any] =max_width - width - len(SCREAMING_SNAKE_CASE )
answer.append(" ".join(SCREAMING_SNAKE_CASE ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 95
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> Optional[int]:
'''simple docstring'''
a__ : str =size if size is not None else {"shortest_edge": 2_0}
a__ : Union[str, Any] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Optional[int] =batch_size
a__ : Any =num_channels
a__ : List[str] =image_size
a__ : Dict =min_resolution
a__ : List[Any] =max_resolution
a__ : Dict =do_resize
a__ : Union[str, Any] =size
a__ : str =do_center_crop
a__ : List[str] =crop_size
def _lowercase ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =MobileNetVaImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "crop_size" ) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Any:
'''simple docstring'''
pass
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : List[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Dict =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : str =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95
| 1
|
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
SCREAMING_SNAKE_CASE_ = ''
while len(lowerCAmelCase__ ) % 3 != 0:
SCREAMING_SNAKE_CASE_ = '0' + bin_string
SCREAMING_SNAKE_CASE_ = [
bin_string[index : index + 3]
for index in range(len(lowerCAmelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
SCREAMING_SNAKE_CASE_ = 0
for index, val in enumerate(lowerCAmelCase__ ):
oct_val += int(2 ** (2 - index) * int(lowerCAmelCase__ ) )
oct_string += str(lowerCAmelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 361
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ : str = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = 0
def lowerCAmelCase_ ( self : Optional[int] ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = AutoConfig.for_model('roberta' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , 'fake-roberta' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertEqual(type(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
try:
AutoConfig.register('custom' , _lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoConfig.register('model' , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoConfig.register('bert' , _lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCAmelCase_ ( self : Optional[int] ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('bert-base' )
def lowerCAmelCase_ ( self : int ):
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase , revision='aaaaaa' )
def lowerCAmelCase_ ( self : Tuple ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def lowerCAmelCase_ ( self : Any ):
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "new-model"
try:
AutoConfig.register('new-model' , _lowerCAmelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 210
| 0
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 32
@property
def __lowerCAmelCase ( self ) ->str:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Dict:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Tuple:
return 100
@property
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.dummy_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : List[str] = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__A = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _snake_case ( lowerCamelCase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Tuple=1 ):
__lowerCamelCase : Dict = tokenizer
__lowerCamelCase : Tuple = dataset
__lowerCamelCase : str = len(UpperCAmelCase ) if n_tasks is None else n_tasks
__lowerCamelCase : Optional[int] = n_copies
def __iter__( self : Dict ):
__lowerCamelCase : List[str] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
__lowerCamelCase : Tuple = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _snake_case ( lowerCamelCase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : int ):
__lowerCamelCase : Optional[int] = start_length
__lowerCamelCase : List[Any] = eof_strings
__lowerCamelCase : int = tokenizer
def __call__( self : str , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__lowerCamelCase : List[str] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def lowercase_ ( _lowerCamelCase: List[Any] ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = re.split("(%s)" % "|".join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: Any , _lowerCamelCase: Dict , _lowerCamelCase: Optional[int] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Optional[int]=20 , **_lowerCamelCase: int ) -> Any:
'''simple docstring'''
__lowerCamelCase : str = defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
__lowerCamelCase : str = batch["ids"].shape[-1]
__lowerCamelCase : List[str] = accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
__lowerCamelCase : List[str] = batch["task_id"].repeat(__snake_case )
__lowerCamelCase : List[str] = accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
__lowerCamelCase : Dict = generated_tokens.cpu().numpy()
__lowerCamelCase : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
__lowerCamelCase : List[Any] = [[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__lowerCamelCase : Dict = tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def lowercase_ ( ) -> Any:
'''simple docstring'''
__lowerCamelCase : List[Any] = HfArgumentParser(__snake_case )
__lowerCamelCase : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__lowerCamelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__lowerCamelCase : int = "false"
if args.num_workers is None:
__lowerCamelCase : Optional[int] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__lowerCamelCase : List[str] = Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
__lowerCamelCase : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
__lowerCamelCase : Union[str, Any] = tokenizer.eos_token
__lowerCamelCase : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__lowerCamelCase : Optional[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
__lowerCamelCase : int = load_dataset("openai_humaneval" )
__lowerCamelCase : Dict = load_metric("code_eval" )
__lowerCamelCase : Union[str, Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
__lowerCamelCase : List[str] = args.n_samples // args.batch_size
__lowerCamelCase : Dict = TokenizedDataset(__snake_case , human_eval["test"] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
__lowerCamelCase : Union[str, Any] = DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__lowerCamelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = accelerator.prepare(__snake_case , __snake_case )
__lowerCamelCase : Tuple = complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
__lowerCamelCase : Dict = []
for task in tqdm(range(__snake_case ) ):
__lowerCamelCase : Any = human_eval["test"][task]["test"]
__lowerCamelCase : Optional[Any] = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
__lowerCamelCase , __lowerCamelCase : Dict = code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 356
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowercase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Dict=False ) -> Any:
'''simple docstring'''
__lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCamelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: int , _lowerCamelCase: List[str]=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCamelCase : Any = ""
else:
__lowerCamelCase : Optional[int] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase : Optional[int] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
__lowerCamelCase : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
__lowerCamelCase : str = in_proj_bias[: config.hidden_size]
__lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase : str = in_proj_bias[-config.hidden_size :]
def lowercase_ ( _lowerCamelCase: int ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Tuple = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : List[Any] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: List[str] , _lowerCamelCase: Optional[int] ) -> Any:
'''simple docstring'''
__lowerCamelCase : str = dct.pop(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = val
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : int = ViTMSNConfig()
__lowerCamelCase : Dict = 1000
__lowerCamelCase : str = "datasets/huggingface/label-files"
__lowerCamelCase : Optional[int] = "imagenet-1k-id2label.json"
__lowerCamelCase : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase ) , "r" ) )
__lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCamelCase : int = idalabel
__lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__lowerCamelCase : int = 384
__lowerCamelCase : Optional[int] = 1536
__lowerCamelCase : str = 6
elif "l16" in checkpoint_url:
__lowerCamelCase : Optional[Any] = 1024
__lowerCamelCase : str = 4096
__lowerCamelCase : Any = 24
__lowerCamelCase : Optional[int] = 16
__lowerCamelCase : Union[str, Any] = 0.1
elif "b4" in checkpoint_url:
__lowerCamelCase : Optional[Any] = 4
elif "l7" in checkpoint_url:
__lowerCamelCase : str = 7
__lowerCamelCase : int = 1024
__lowerCamelCase : int = 4096
__lowerCamelCase : Union[str, Any] = 24
__lowerCamelCase : Optional[int] = 16
__lowerCamelCase : List[Any] = 0.1
__lowerCamelCase : str = ViTMSNModel(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["target_encoder"]
__lowerCamelCase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowerCamelCase )
__lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , base_model=_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__lowerCamelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCamelCase : Tuple = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
__lowerCamelCase : List[str] = ViTImageProcessor(
size=config.image_size , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__lowerCamelCase : Tuple = image_processor(images=_lowerCamelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
__lowerCamelCase : Optional[int] = model(**_lowerCamelCase )
__lowerCamelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__lowerCamelCase : Any = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
__lowerCamelCase : Optional[Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__lowerCamelCase : List[str] = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__lowerCamelCase : str = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
__lowerCamelCase : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCamelCase , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 64
| 0
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[str] = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : int = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1_024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1_024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowercase : List[Any] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowercase : Union[str, Any] = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=SCREAMING_SNAKE_CASE__ , output_all_encodings=SCREAMING_SNAKE_CASE__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , SCREAMING_SNAKE_CASE__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowercase : Any = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowercase : Optional[int] = os.path.join(get_home_dir() , """models""" )
lowercase : List[Any] = _load_vocab(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls=SCREAMING_SNAKE_CASE__ )
lowercase : int = nlp.model.BERTModel(
SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=SCREAMING_SNAKE_CASE__ , use_token_type_embed=SCREAMING_SNAKE_CASE__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=SCREAMING_SNAKE_CASE__ , use_decoder=SCREAMING_SNAKE_CASE__ , )
original_bort.load_parameters(SCREAMING_SNAKE_CASE__ , cast_dtype=SCREAMING_SNAKE_CASE__ , ignore_extra=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowercase : Union[str, Any] = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(SCREAMING_SNAKE_CASE__ ),
}
lowercase : Dict = BertConfig.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : Any = BertForMaskedLM(SCREAMING_SNAKE_CASE__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(SCREAMING_SNAKE_CASE__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = hf_param.shape
lowercase : int = to_torch(params[gluon_param] )
lowercase : str = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
lowercase : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowercase : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowercase : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowercase : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowercase : Tuple = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowercase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
lowercase : BertSelfAttention = layer.attention.self
lowercase : Any = check_and_map_params(
self_attn.key.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
lowercase : str = check_and_map_params(
self_attn.key.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
lowercase : Any = check_and_map_params(
self_attn.query.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
lowercase : List[str] = check_and_map_params(
self_attn.query.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
lowercase : List[str] = check_and_map_params(
self_attn.value.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
lowercase : Optional[Any] = check_and_map_params(
self_attn.value.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
lowercase : BertSelfOutput = layer.attention.output
lowercase : List[Any] = check_and_map_params(
self_output.dense.bias , f"encoder.transformer_cells.{i}.proj.bias" )
lowercase : Optional[Any] = check_and_map_params(
self_output.dense.weight , f"encoder.transformer_cells.{i}.proj.weight" )
lowercase : Union[str, Any] = check_and_map_params(
self_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.layer_norm.beta" )
lowercase : List[str] = check_and_map_params(
self_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
lowercase : BertIntermediate = layer.intermediate
lowercase : Dict = check_and_map_params(
intermediate.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
lowercase : str = check_and_map_params(
intermediate.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
lowercase : BertOutput = layer.output
lowercase : Optional[Any] = check_and_map_params(
bert_output.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
lowercase : int = check_and_map_params(
bert_output.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
lowercase : str = check_and_map_params(
bert_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
lowercase : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowercase : List[Any] = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowercase : int = tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ )["""input_ids"""]
# Get gluon output
lowercase : Any = mx.nd.array([input_ids] )
lowercase : List[Any] = original_bort(inputs=SCREAMING_SNAKE_CASE__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : str = BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
hf_bort_model.eval()
lowercase : Union[str, Any] = tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
lowercase : List[str] = hf_bort_model(**SCREAMING_SNAKE_CASE__ )[0]
lowercase : str = output_gluon[0].asnumpy()
lowercase : Optional[int] = output_hf[0].detach().numpy()
lowercase : Optional[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowercase : List[str] = np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase : Union[str, Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 20
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCAmelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
UpperCAmelCase__ = TOKENIZER_CLASSES
else:
UpperCAmelCase__ = {tokenizer_name: getattr(_lowerCAmelCase , tokenizer_name + "Fast" )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
UpperCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase__ = True
if checkpoint_name is None:
UpperCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase__ = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
UpperCAmelCase__ = tokenizer_class.from_pretrained(_lowerCAmelCase , force_download=_lowerCAmelCase )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase__ , UpperCAmelCase__ = checkpoint.split("/" )
UpperCAmelCase__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
elif add_prefix:
UpperCAmelCase__ = checkpoint
UpperCAmelCase__ = dump_path
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase__ = file_path.split(_lowerCAmelCase )[-1][0]
if next_char == "/":
UpperCAmelCase__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
UpperCAmelCase__ = tokenizer.save_pretrained(
_lowerCAmelCase , legacy_format=_lowerCAmelCase , filename_prefix=_lowerCAmelCase )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(_lowerCAmelCase )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
_lowerCAmelCase : str = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 169
| 0
|
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : str = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class __lowerCAmelCase ( __a ):
@add_start_docstrings(lowerCAmelCase__ )
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : Any = max_length
_UpperCAmelCase : Optional[int] = max_position_embeddings
@add_start_docstrings(lowerCAmelCase__ )
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
_UpperCAmelCase : Dict = input_ids.shape[-1]
_UpperCAmelCase : Optional[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
"""with `max_length = start_length + max_new_tokens` instead.""" , lowerCAmelCase__ , )
_UpperCAmelCase : int = start_length
_UpperCAmelCase : Any = max_new_tokens
_UpperCAmelCase : Optional[Any] = start_length + max_new_tokens
@add_start_docstrings(lowerCAmelCase__ )
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
return input_ids.shape[-1] >= self.max_length
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : str = max_time
_UpperCAmelCase : str = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowerCAmelCase__ )
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
return time.time() - self.initial_timestamp > self.max_time
class __lowerCAmelCase ( __a ):
@add_start_docstrings(lowerCAmelCase__ )
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
return any(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) for criteria in self )
@property
def snake_case_ (self ):
for stopping_criterium in self:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return stopping_criterium.max_length
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return stopping_criterium.max_length
return None
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = stopping_criteria.max_length
_UpperCAmelCase : Optional[int] = deepcopy(lowerCAmelCase_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , lowerCAmelCase_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCAmelCase_ ) )
return new_stopping_criteria
| 170
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ : List[str] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : Tuple = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
lowerCAmelCase_ : str = '''▁'''
class __lowerCAmelCase ( __a ):
snake_case : List[str] = VOCAB_FILES_NAMES
snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : str = ["""input_ids""", """attention_mask"""]
snake_case : List[Any] = BarthezTokenizer
def __init__(self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , **lowerCAmelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : List[str] = vocab_file
_UpperCAmelCase : Tuple = False if not self.vocab_file else True
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
_UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : str = [self.sep_token_id]
_UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 170
| 1
|
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if not nums:
return 0
lowercase = nums[0]
lowercase = 0
for num in nums[1:]:
lowercase , lowercase = (
max_excluding + num,
max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),
)
return max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase_ ( ):
lowercase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__SCREAMING_SNAKE_CASE , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
def UpperCAmelCase_ ( ):
lowercase = parse_args()
# Import training_script as a module.
lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase = script_fpath.stem
lowercase = importlib.import_module(__SCREAMING_SNAKE_CASE )
# Patch sys.argv
lowercase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 195
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=8 ):
UpperCAmelCase : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=5_12 , UpperCAmelCase_=5_12 ):
UpperCAmelCase : List[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase : Tuple = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase : List[Any] = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase : List[str] = np.transpose(UpperCAmelCase_ , [2, 0, 1] )
UpperCAmelCase : Tuple = torch.from_numpy(UpperCAmelCase_ ).unsqueeze(0 )
return image
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : UNetaDConditionModel , lowercase_ : DDPMScheduler , lowercase_ : VQModel , ) -> str:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase : Tuple = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase : Any = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase : Any = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Any=None ) -> Optional[Any]:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
UpperCAmelCase : Any = image.to(device=lowercase_ , dtype=lowercase_ )
UpperCAmelCase : List[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase : List[Any] = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
UpperCAmelCase : Dict = torch.cat(lowercase_ , dim=0 )
else:
UpperCAmelCase : List[str] = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
UpperCAmelCase : Optional[Any] = self.movq.config.scaling_factor * init_latents
UpperCAmelCase : int = torch.cat([init_latents] , dim=0 )
UpperCAmelCase : Union[str, Any] = init_latents.shape
UpperCAmelCase : Union[str, Any] = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
UpperCAmelCase : Union[str, Any] = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Optional[Any] = init_latents
return latents
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Union[str, Any]=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase : int = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : int=0 ) -> str:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase : int = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase : Dict = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self : Optional[Any] , lowercase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 100 , lowercase_ : float = 4.0 , lowercase_ : float = 0.3 , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> Dict:
UpperCAmelCase : str = self._execution_device
UpperCAmelCase : int = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : int = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase : List[str] = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : str = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[Any] = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCAmelCase : Dict = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
UpperCAmelCase : Optional[int] = image.to(dtype=image_embeds.dtype , device=lowercase_ )
UpperCAmelCase : List[str] = self.movq.encode(lowercase_ )['latents']
UpperCAmelCase : int = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase , UpperCAmelCase : Dict = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase , UpperCAmelCase : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
UpperCAmelCase : int = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : int = {'image_embeds': image_embeds}
UpperCAmelCase : Optional[int] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase : Optional[int] = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase : int = variance_pred.chunk(2 )
UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Optional[Any] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase : List[Any] = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase : int = image * 0.5 + 0.5
UpperCAmelCase : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase : str = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 280
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowercase__ = TypeVar("KEY")
lowercase__ = TypeVar("VAL")
@dataclass(frozen=_snake_case , slots=_snake_case )
class A_ ( Generic[KEY, VAL] ):
'''simple docstring'''
UpperCAmelCase_ : KEY
UpperCAmelCase_ : VAL
class A_ ( _Item ):
'''simple docstring'''
def __init__( self : Any ) -> None:
super().__init__(lowercase_ , lowercase_ )
def __bool__( self : List[str] ) -> bool:
return False
lowercase__ = _DeletedItem()
class A_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : int = 8 , lowercase_ : float = 0.75 ) -> None:
UpperCAmelCase : Dict = initial_block_size
UpperCAmelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCAmelCase : Any = capacity_factor
UpperCAmelCase : Union[str, Any] = 0
def UpperCAmelCase_ ( self : str , lowercase_ : KEY ) -> int:
return hash(lowercase_ ) % len(self._buckets )
def UpperCAmelCase_ ( self : Any , lowercase_ : int ) -> int:
return (ind + 1) % len(self._buckets )
def UpperCAmelCase_ ( self : Dict , lowercase_ : int , lowercase_ : KEY , lowercase_ : VAL ) -> bool:
UpperCAmelCase : List[Any] = self._buckets[ind]
if not stored:
UpperCAmelCase : Dict = _Item(lowercase_ , lowercase_ )
self._len += 1
return True
elif stored.key == key:
UpperCAmelCase : Dict = _Item(lowercase_ , lowercase_ )
return True
else:
return False
def UpperCAmelCase_ ( self : Any ) -> bool:
UpperCAmelCase : List[str] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCAmelCase : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase_ ( self : Dict , lowercase_ : int ) -> None:
UpperCAmelCase : int = self._buckets
UpperCAmelCase : List[str] = [None] * new_size
UpperCAmelCase : Dict = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase_ ( self : Dict ) -> None:
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : KEY ) -> Iterator[int]:
UpperCAmelCase : Dict = self._get_bucket_index(lowercase_ )
for _ in range(len(self._buckets ) ):
yield ind
UpperCAmelCase : Union[str, Any] = self._get_next_ind(lowercase_ )
def UpperCAmelCase_ ( self : Dict , lowercase_ : KEY , lowercase_ : VAL ) -> None:
for ind in self._iterate_buckets(lowercase_ ):
if self._try_set(lowercase_ , lowercase_ , lowercase_ ):
break
def __setitem__( self : Union[str, Any] , lowercase_ : KEY , lowercase_ : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowercase_ , lowercase_ )
def __delitem__( self : Tuple , lowercase_ : KEY ) -> None:
for ind in self._iterate_buckets(lowercase_ ):
UpperCAmelCase : int = self._buckets[ind]
if item is None:
raise KeyError(lowercase_ )
if item is _deleted:
continue
if item.key == key:
UpperCAmelCase : Union[str, Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Dict , lowercase_ : KEY ) -> VAL:
for ind in self._iterate_buckets(lowercase_ ):
UpperCAmelCase : List[str] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase_ )
def __len__( self : Optional[Any] ) -> int:
return self._len
def __iter__( self : int ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ) -> str:
UpperCAmelCase : int = ' ,'.join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 280
| 1
|
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
assert column_title.isupper()
A__ = 0
A__ = len(lowercase_ ) - 1
A__ = 0
while index >= 0:
A__ = (ord(column_title[index] ) - 64) * pow(26 , lowercase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 14
|
'''simple docstring'''
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
_a : Dict = {}
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_a ,' -> ' ,' -> '.join([str(_a ) for j in self.vertex[i]] ) )
def __lowercase ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_a )
else:
# else make a new vertex
_a : int = [to_vertex]
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_a ,_a )
def __lowercase ( self : Union[str, Any] ,_a : int ,_a : list ):
'''simple docstring'''
_a : List[Any] = True
print(_a ,end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_a ,_a )
if __name__ == "__main__":
__lowerCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 271
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345
| 1
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = ''
__UpperCAmelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__UpperCAmelCase : str = None # compression type in fsspec. ex: "gzip"
__UpperCAmelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self : List[str] , __UpperCAmelCase : str = "" , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[dict] = None , **__UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(self , **__UpperCAmelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase__ = fsspec.open(
__UpperCAmelCase , mode="rb" , protocol=__UpperCAmelCase , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase__ = os.path.basename(self.file.path.split("::" )[0] )
UpperCAmelCase__ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
UpperCAmelCase__ = None
@classmethod
def lowercase_ (cls : int , __UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return super()._strip_protocol(__UpperCAmelCase ).lstrip("/" )
def lowercase_ (self : Any ) -> Any:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase__ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
UpperCAmelCase__ = {f["name"]: f}
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
return self.file.open().read()
def lowercase_ (self : Any , __UpperCAmelCase : str , __UpperCAmelCase : str = "rb" , __UpperCAmelCase : Any=None , __UpperCAmelCase : str=True , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : str , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self._strip_protocol(__UpperCAmelCase )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = 'bz2'
__UpperCAmelCase : Dict = 'bz2'
__UpperCAmelCase : Dict = '.bz2'
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = 'gzip'
__UpperCAmelCase : Optional[Any] = 'gzip'
__UpperCAmelCase : Union[str, Any] = '.gz'
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = 'lz4'
__UpperCAmelCase : str = 'lz4'
__UpperCAmelCase : Any = '.lz4'
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = 'xz'
__UpperCAmelCase : int = 'xz'
__UpperCAmelCase : Union[str, Any] = '.xz'
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = 'zstd'
__UpperCAmelCase : str = 'zstd'
__UpperCAmelCase : Optional[Any] = '.zst'
def __init__(self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str = "rb" , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[dict] = None , __UpperCAmelCase : int = DEFAULT_BLOCK_SIZE , **__UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
super().__init__(
fo=__UpperCAmelCase , mode=__UpperCAmelCase , target_protocol=__UpperCAmelCase , target_options=__UpperCAmelCase , block_size=__UpperCAmelCase , **__UpperCAmelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase__ = self.file.__enter__
class A :
def __init__(self : List[Any] , __UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = file_
def __enter__(self : List[str] ) -> Tuple:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__(self : int , *__UpperCAmelCase : Any , **__UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
self._file.__exit__(*__UpperCAmelCase , **__UpperCAmelCase )
def __iter__(self : str ) -> List[Any]:
"""simple docstring"""
return iter(self._file )
def lowercase_ (self : int ) -> Any:
"""simple docstring"""
return next(self._file )
def __getattr__(self : Optional[int] , __UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return getattr(self._file , __UpperCAmelCase )
def fixed_enter(*__UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
return WrappedFile(_enter(*__UpperCAmelCase , **__UpperCAmelCase ) )
UpperCAmelCase__ = fixed_enter
| 65
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = """gpt_neox"""
def __init__( self : List[str] , lowercase_ : str=50432 , lowercase_ : List[Any]=6144 , lowercase_ : List[Any]=44 , lowercase_ : Union[str, Any]=64 , lowercase_ : List[str]=24576 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.25 , lowercase_ : Optional[int]=10000 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : int=0.1 , lowercase_ : Tuple=2048 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : str=True , lowercase_ : str=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , **lowercase_ : Optional[int] , ):
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : str = rotary_pct
snake_case_ : Dict = rotary_emb_base
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = hidden_dropout
snake_case_ : Tuple = classifier_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Any = use_cache
snake_case_ : Optional[int] = tie_word_embeddings
snake_case_ : Any = use_parallel_residual
snake_case_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
snake_case_ : Any = self.rope_scaling.get('''type''' , lowercase_ )
snake_case_ : Union[str, Any] = self.rope_scaling.get('''factor''' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 264
| 0
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowercase__ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : int , *a_ : Optional[Any] , a_ : Tuple=None , a_ : Dict=None , a_ : Tuple=None , **a_ : str ):
super().__init__(*a_ , **a_ )
lowerCAmelCase_ : Optional[Any] = eval_examples
lowerCAmelCase_ : Tuple = post_process_function
lowerCAmelCase_ : Any = quant_trainer_args
lowerCAmelCase_ : List[Any] = 1_28 # default number of calibration samples
def lowerCamelCase ( self : Union[str, Any] , a_ : Optional[Any]=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
lowerCAmelCase_ : Dict = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCAmelCase_ : Any = self._remove_unused_columns(a_ , description="Calibration" )
return DataLoader(
a_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=a_ , )
def lowerCamelCase ( self : Optional[Any] , a_ : Union[str, Any]=None ):
lowerCAmelCase_ : List[Any] = self.train_dataset if calib_dataset is None else calib_dataset
lowerCAmelCase_ : List[str] = self.get_calib_dataloader(a_ )
lowerCAmelCase_ : int = self.model
quant_trainer.configure_model(a_ , self.quant_trainer_args , calib=a_ )
model.eval()
quant_trainer.enable_calibration(a_ )
logger.info("***** Running calibration *****" )
logger.info(f''' Num examples = {self.calib_num}''' )
logger.info(f''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(a_ ):
# Prediction step
lowerCAmelCase_ : Union[str, Any] = self.prediction_step(a_ , a_ , prediction_loss_only=a_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(a_ , self.quant_trainer_args )
lowerCAmelCase_ : Tuple = model
def lowerCamelCase ( self : List[Any] , a_ : List[Any]=None , a_ : Dict=None , a_ : Optional[int]=None , a_ : str = "eval" ):
lowerCAmelCase_ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase_ : List[Any] = self.get_eval_dataloader(a_ )
lowerCAmelCase_ : List[str] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase_ : Tuple = self.compute_metrics
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase_ : Optional[Any] = eval_loop(
a_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a_ , )
finally:
lowerCAmelCase_ : Optional[int] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCAmelCase_ : Union[str, Any] = self.post_process_function(a_ , a_ , output.predictions )
lowerCAmelCase_ : List[str] = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
lowerCAmelCase_ : List[str] = metrics.pop(a_ )
self.log(a_ )
else:
lowerCAmelCase_ : Any = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase_ : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , a_ )
return metrics
def lowerCamelCase ( self : str , a_ : Dict , a_ : int , a_ : Dict=None , a_ : str = "test" ):
lowerCAmelCase_ : List[Any] = self.get_test_dataloader(a_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase_ : List[str] = self.compute_metrics
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase_ : Dict = eval_loop(
a_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a_ , )
finally:
lowerCAmelCase_ : int = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase_ : Any = self.post_process_function(a_ , a_ , output.predictions , "predict" )
lowerCAmelCase_ : Dict = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
lowerCAmelCase_ : Union[str, Any] = metrics.pop(a_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a_ )
def lowerCamelCase ( self : Union[str, Any] , a_ : str="./" ):
lowerCAmelCase_ : Any = self.eval_dataset
lowerCAmelCase_ : Tuple = self.get_eval_dataloader(a_ )
lowerCAmelCase_ : Any = next(iter(a_ ) )
# saving device - to make it consistent
lowerCAmelCase_ : List[str] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
lowerCAmelCase_ : Any = tuple(v.to(a_ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Dict = self.model.to(a_ )
model.eval()
model.float()
lowerCAmelCase_ : List[Any] = model.module if hasattr(a_ , "module" ) else model
quant_trainer.configure_model(a_ , self.quant_trainer_args )
lowerCAmelCase_ : List[Any] = os.path.join(a_ , "model.onnx" )
logger.info(f'''exporting model to {output_model_file}''' )
lowerCAmelCase_ : Tuple = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
a_ , a_ , a_ , export_params=a_ , opset_version=13 , do_constant_folding=a_ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=a_ , )
logger.info("onnx export finished" )
| 352
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : str = """ssube/stable-diffusion-x4-upscaler-onnx"""
def lowerCamelCase ( self : Any , a_ : Dict=0 ):
lowerCAmelCase_ : Dict = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(a_ ) )
lowerCAmelCase_ : Tuple = torch.manual_seed(a_ )
lowerCAmelCase_ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = self.get_dummy_inputs()
lowerCAmelCase_ : List[Any] = pipe(**a_ ).images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : int = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Optional[Any] = self.get_dummy_inputs()
lowerCAmelCase_ : Union[str, Any] = pipe(**a_ ).images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : str = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase_ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Optional[int] = self.get_dummy_inputs()
lowerCAmelCase_ : Tuple = pipe(**a_ ).images
lowerCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : str = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase_ : str = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = self.get_dummy_inputs()
lowerCAmelCase_ : List[Any] = pipe(**a_ ).images
lowerCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Optional[int] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = self.get_dummy_inputs()
lowerCAmelCase_ : List[str] = pipe(**a_ ).images
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Optional[Any] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCamelCase ( self : Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Tuple = ort.SessionOptions()
lowerCAmelCase_ : List[str] = False
return options
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase_ : List[Any] = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
lowerCAmelCase_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Dict = "A fantasy landscape, trending on artstation"
lowerCAmelCase_ : Any = torch.manual_seed(0 )
lowerCAmelCase_ : Dict = pipe(
prompt=a_ , image=a_ , guidance_scale=7.5 , num_inference_steps=10 , generator=a_ , output_type="np" , )
lowerCAmelCase_ : Dict = output.images
lowerCAmelCase_ : Optional[int] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase_ : Union[str, Any] = init_image.resize((1_28, 1_28) )
lowerCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
lowerCAmelCase_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Any = "A fantasy landscape, trending on artstation"
lowerCAmelCase_ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = pipe(
prompt=a_ , image=a_ , guidance_scale=7.5 , num_inference_steps=20 , generator=a_ , output_type="np" , )
lowerCAmelCase_ : List[str] = output.images
lowerCAmelCase_ : Any = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : List[str] = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 161
| 0
|
"""simple docstring"""
import os
def _lowerCAmelCase ( UpperCamelCase_ = "matrix.txt" ):
with open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) as in_file:
__SCREAMING_SNAKE_CASE = in_file.read()
__SCREAMING_SNAKE_CASE = [[int(lowerCamelCase__ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
__SCREAMING_SNAKE_CASE = [[0 for cell in row] for row in grid]
__SCREAMING_SNAKE_CASE = len(grid[0] )
__SCREAMING_SNAKE_CASE = [[0 for i in range(lowerCamelCase__ )] for j in range(lowerCamelCase__ )]
__SCREAMING_SNAKE_CASE = grid[0][0]
for i in range(1 , lowerCamelCase__ ):
__SCREAMING_SNAKE_CASE = grid[0][i] + dp[0][i - 1]
for i in range(1 , lowerCamelCase__ ):
__SCREAMING_SNAKE_CASE = grid[i][0] + dp[i - 1][0]
for i in range(1 , lowerCamelCase__ ):
for j in range(1 , lowerCamelCase__ ):
__SCREAMING_SNAKE_CASE = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 768 , ):
super().__init__()
lowercase__ : List[str] = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[int] = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Union[str, torch.device]] = None , SCREAMING_SNAKE_CASE : Optional[torch.dtype] = None , ):
lowercase__ : Union[str, Any] = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
return self
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[int] = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = (embeds * self.std) + self.mean
return embeds
| 130
| 0
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__snake_case : int = datasets.logging.get_logger(__name__)
__snake_case : Union[str, Any] = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
__snake_case : Dict = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
__snake_case : str = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
__snake_case : int = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
lowerCAmelCase__ = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
lowerCAmelCase__ = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowerCAmelCase__ = self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
lowerCAmelCase__ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowerCAmelCase__ = score.BleurtScorer(os.path.join(_UpperCamelCase , _UpperCamelCase ) )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.scorer.score(references=_UpperCamelCase , candidates=_UpperCamelCase )
return {"scores": scores}
| 122
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : int = KandinskyVaaInpaintPipeline
_SCREAMING_SNAKE_CASE : int = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_SCREAMING_SNAKE_CASE : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_SCREAMING_SNAKE_CASE : Optional[Any] = False
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1_00
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase__ = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=_UpperCamelCase , )
lowerCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
lowerCAmelCase__ = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase__ = 0
if str(_UpperCamelCase ).startswith('mps' ):
lowerCAmelCase__ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase__ = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu'
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**_UpperCamelCase )
lowerCAmelCase__ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
lowerCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase__ = np.ones((7_68, 7_68) , dtype=np.floataa )
lowerCAmelCase__ = 0
lowerCAmelCase__ = 'a hat'
lowerCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
lowerCAmelCase__ = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase__ = pipeline(
image=_UpperCamelCase , mask_image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 122
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a__ : Any =logging.get_logger(__name__)
a__ : List[str] ='''▁'''
a__ : Optional[Any] ={'''vocab_file''': '''sentencepiece.bpe.model'''}
a__ : Any ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
a__ : Optional[int] ={
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
a__ : Optional[Any] =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] =["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ : List[int] =[]
SCREAMING_SNAKE_CASE_ : List[int] =[]
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : Any="<s>" , __A : List[str]="</s>" , __A : Dict="</s>" , __A : str="<s>" , __A : Optional[int]="<unk>" , __A : str="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=None , __A : Optional[int]=None , __A : Optional[Any]=None , __A : Optional[Dict[str, Any]] = None , __A : Dict=None , **__A : int , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , tokenizer_file=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
__UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCamelCase = 1
__UpperCamelCase = len(self.sp_model )
__UpperCamelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A )
}
__UpperCamelCase = {v: k for k, v in self.lang_code_to_id.items()}
__UpperCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__UpperCamelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__UpperCamelCase = src_lang if src_lang is not None else 'en_XX'
__UpperCamelCase = self.lang_code_to_id[self._src_lang]
__UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[Any] ):
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
__UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , __A : int ):
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _lowerCamelCase ( self : Union[str, Any] ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowerCamelCase ( self : List[str] ):
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self : Optional[int] , __A : str ):
__UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
__UpperCamelCase = [1] * len(self.prefix_tokens )
__UpperCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def _lowerCamelCase ( self : Union[str, Any] , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self : Tuple , __A : List[int] , __A : Optional[List[int]] = None ):
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self : Union[str, Any] , __A : Union[str, Any] , __A : str , __A : Optional[str] , __A : Optional[str] , **__A : List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase = src_lang
__UpperCamelCase = self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
__UpperCamelCase = self.convert_tokens_to_ids(__A )
__UpperCamelCase = tgt_lang_id
return inputs
def _lowerCamelCase ( self : str ):
__UpperCamelCase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self : List[Any] , __A : str ):
return self.sp_model.encode(__A , out_type=__A )
def _lowerCamelCase ( self : Optional[int] , __A : List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self : Dict , __A : List[str] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCamelCase ( self : Optional[Any] , __A : Optional[Any] ):
__UpperCamelCase = ''.join(__A ).replace(__A , ' ' ).strip()
return out_string
def _lowerCamelCase ( self : Tuple , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , 'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def _lowerCamelCase ( self : Tuple , __A : List[str] , __A : str = "en_XX" , __A : Optional[List[str]] = None , __A : str = "ro_RO" , **__A : int , ):
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def _lowerCamelCase ( self : Optional[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self : Optional[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self : Dict , __A : List[Any] ):
__UpperCamelCase = self.lang_code_to_id[src_lang]
__UpperCamelCase = []
__UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
def _lowerCamelCase ( self : Tuple , __A : str ):
__UpperCamelCase = self.lang_code_to_id[lang]
__UpperCamelCase = []
__UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
| 53
|
"""simple docstring"""
import argparse
import json
import subprocess
def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> List[Any]:
lowerCamelCase_ = []
lowerCamelCase_ = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowerCamelCase_ = subprocess.run(_lowerCamelCase , shell=_lowerCamelCase , stdout=subprocess.PIPE )
lowerCamelCase_ = output.stdout.decode('utf-8' )
lowerCamelCase_ = json.loads(_lowerCamelCase )
lowerCamelCase_ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
lowerCamelCase_ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> Tuple:
return values.split(',' )
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 183
| 0
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 279
|
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
snake_case_ : List[Any] = 0
snake_case_ : Tuple = str(_UpperCamelCase )
while len(_UpperCamelCase ) != 1:
snake_case_ : Tuple = [int(_UpperCamelCase ) for i in num_string]
snake_case_ : Dict = 1
for i in range(0 , len(_UpperCamelCase ) ):
total *= numbers[i]
snake_case_ : str = str(_UpperCamelCase )
steps += 1
return steps
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
snake_case_ : Any = 0
snake_case_ : Tuple = str(_UpperCamelCase )
while len(_UpperCamelCase ) != 1:
snake_case_ : List[str] = [int(_UpperCamelCase ) for i in num_string]
snake_case_ : Optional[int] = 0
for i in range(0 , len(_UpperCamelCase ) ):
total += numbers[i]
snake_case_ : Tuple = str(_UpperCamelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279
| 1
|
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
a_ : Optional[int] = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def a_ ( __snake_case : Optional[Any] , __snake_case : tuple , __snake_case : Path , __snake_case : List[Any] , __snake_case : str , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : int=False , ) -> Any:
"""simple docstring"""
output_path.parent.mkdir(parents=__snake_case , exist_ok=__snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__snake_case , __snake_case , f=output_path.as_posix() , input_names=__snake_case , output_names=__snake_case , dynamic_axes=__snake_case , do_constant_folding=__snake_case , use_external_data_format=__snake_case , enable_onnx_checker=__snake_case , opset_version=__snake_case , )
else:
export(
__snake_case , __snake_case , f=output_path.as_posix() , input_names=__snake_case , output_names=__snake_case , dynamic_axes=__snake_case , do_constant_folding=__snake_case , opset_version=__snake_case , )
@torch.no_grad()
def a_ ( __snake_case : str , __snake_case : str , __snake_case : int , __snake_case : bool = False ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase_ ='''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =StableDiffusionPipeline.from_pretrained(__snake_case , torch_dtype=__snake_case ).to(__snake_case )
lowerCamelCase_ =Path(__snake_case )
# TEXT ENCODER
lowerCamelCase_ =pipeline.text_encoder.config.max_position_embeddings
lowerCamelCase_ =pipeline.text_encoder.config.hidden_size
lowerCamelCase_ =pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=__snake_case , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__snake_case , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=__snake_case , )
del pipeline.text_encoder
# UNET
lowerCamelCase_ =pipeline.unet.config.in_channels
lowerCamelCase_ =pipeline.unet.config.sample_size
lowerCamelCase_ =output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , __snake_case , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ),
torch.randn(2 ).to(device=__snake_case , dtype=__snake_case ),
torch.randn(2 , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ),
False,
) , output_path=__snake_case , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=__snake_case , use_external_data_format=__snake_case , )
lowerCamelCase_ =str(unet_path.absolute().as_posix() )
lowerCamelCase_ =os.path.dirname(__snake_case )
lowerCamelCase_ =onnx.load(__snake_case )
# clean up existing tensor files
shutil.rmtree(__snake_case )
os.mkdir(__snake_case )
# collate external tensor files into one
onnx.save_model(
__snake_case , __snake_case , save_as_external_data=__snake_case , all_tensors_to_one_file=__snake_case , location='''weights.pb''' , convert_attribute=__snake_case , )
del pipeline.unet
# VAE ENCODER
lowerCamelCase_ =pipeline.vae
lowerCamelCase_ =vae_encoder.config.in_channels
lowerCamelCase_ =vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCamelCase_ =lambda __snake_case , __snake_case : vae_encoder.encode(__snake_case , __snake_case )[0].sample()
onnx_export(
__snake_case , model_args=(
torch.randn(1 , __snake_case , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__snake_case , )
# VAE DECODER
lowerCamelCase_ =pipeline.vae
lowerCamelCase_ =vae_decoder.config.latent_channels
lowerCamelCase_ =vae_decoder.config.out_channels
# forward only through the decoder part
lowerCamelCase_ =vae_encoder.decode
onnx_export(
__snake_case , model_args=(
torch.randn(1 , __snake_case , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__snake_case , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCamelCase_ =pipeline.safety_checker
lowerCamelCase_ =safety_checker.config.vision_config.num_channels
lowerCamelCase_ =safety_checker.config.vision_config.image_size
lowerCamelCase_ =safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , __snake_case , __snake_case , __snake_case , ).to(device=__snake_case , dtype=__snake_case ),
torch.randn(1 , __snake_case , __snake_case , __snake_case ).to(device=__snake_case , dtype=__snake_case ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=__snake_case , )
del pipeline.safety_checker
lowerCamelCase_ =OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
lowerCamelCase_ =pipeline.feature_extractor
else:
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=__snake_case , feature_extractor=__snake_case , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(__snake_case )
print('''ONNX pipeline saved to''' , __snake_case )
del pipeline
del onnx_pipeline
lowerCamelCase_ =OnnxStableDiffusionPipeline.from_pretrained(__snake_case , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
a_ : int = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 75
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=1_8 , snake_case=3_0 , snake_case=4_0_0 , snake_case=True , snake_case=None , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict =size if size is not None else {'height': 1_8, 'width': 1_8}
_UpperCAmelCase : Tuple =parent
_UpperCAmelCase : Any =batch_size
_UpperCAmelCase : Union[str, Any] =num_channels
_UpperCAmelCase : int =image_size
_UpperCAmelCase : int =min_resolution
_UpperCAmelCase : Tuple =max_resolution
_UpperCAmelCase : List[str] =do_resize
_UpperCAmelCase : Any =size
_UpperCAmelCase : int =do_normalize
_UpperCAmelCase : Any =image_mean
_UpperCAmelCase : Any =image_std
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __magic_name__ ( __UpperCamelCase ,unittest.TestCase ):
UpperCAmelCase =DPTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict =DPTImageProcessingTester(self)
@property
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : int =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case , 'image_mean'))
self.assertTrue(hasattr(snake_case , 'image_std'))
self.assertTrue(hasattr(snake_case , 'do_normalize'))
self.assertTrue(hasattr(snake_case , 'do_resize'))
self.assertTrue(hasattr(snake_case , 'size'))
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8})
_UpperCAmelCase : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2})
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
# Initialize image_processing
_UpperCAmelCase : Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image)
# Test not batched input
_UpperCAmelCase : str =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase : str =image_processing(snake_case , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
# Initialize image_processing
_UpperCAmelCase : Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray)
# Test not batched input
_UpperCAmelCase : Any =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase : Dict =image_processing(snake_case , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
# Initialize image_processing
_UpperCAmelCase : Any =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor)
# Test not batched input
_UpperCAmelCase : str =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase : Any =image_processing(snake_case , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 351
|
'''simple docstring'''
from typing import Any
def lowerCamelCase__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : dict , __lowerCamelCase : dict , __lowerCamelCase : dict , ):
'''simple docstring'''
_validation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
# Creates data structures and fill initial step
_UpperCAmelCase : dict ={}
_UpperCAmelCase : dict ={}
for state in states_space:
_UpperCAmelCase : int =observations_space[0]
_UpperCAmelCase : int =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_UpperCAmelCase : int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowerCamelCase ) ):
_UpperCAmelCase : List[Any] =observations_space[o]
_UpperCAmelCase : Optional[int] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_UpperCAmelCase : List[str] =''
_UpperCAmelCase : Dict =-1
for k_state in states_space:
_UpperCAmelCase : List[str] =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_UpperCAmelCase : int =probability
_UpperCAmelCase : List[Any] =k_state
# Update probabilities and pointers dicts
_UpperCAmelCase : str =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_UpperCAmelCase : List[Any] =arg_max
# The final observation
_UpperCAmelCase : int =observations_space[len(__lowerCamelCase ) - 1]
# argmax for given final observation
_UpperCAmelCase : Any =''
_UpperCAmelCase : Union[str, Any] =-1
for k_state in states_space:
_UpperCAmelCase : Optional[int] =probabilities[(k_state, final_observation)]
if probability > max_probability:
_UpperCAmelCase : Union[str, Any] =probability
_UpperCAmelCase : int =k_state
_UpperCAmelCase : int =arg_max
# Process pointers backwards
_UpperCAmelCase : List[str] =last_state
_UpperCAmelCase : Optional[int] =[]
for o in range(len(__lowerCamelCase ) - 1 , -1 , -1 ):
result.append(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
'''simple docstring'''
_validate_not_empty(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_validate_lists(__lowerCamelCase , __lowerCamelCase )
_validate_dicts(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any ):
'''simple docstring'''
_validate_list(__lowerCamelCase , 'observations_space' )
_validate_list(__lowerCamelCase , 'states_space' )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
'''simple docstring'''
if not isinstance(_object , __lowerCamelCase ):
_UpperCAmelCase : Any =f"{var_name} must be a list"
raise ValueError(__lowerCamelCase )
else:
for x in _object:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_UpperCAmelCase : Optional[int] =f"{var_name} must be a list of strings"
raise ValueError(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
'''simple docstring'''
_validate_dict(__lowerCamelCase , 'initial_probabilities' , __lowerCamelCase )
_validate_nested_dict(__lowerCamelCase , 'transition_probabilities' )
_validate_nested_dict(__lowerCamelCase , 'emission_probabilities' )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
'''simple docstring'''
_validate_dict(_object , __lowerCamelCase , __lowerCamelCase )
for x in _object.values():
_validate_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : type , __lowerCamelCase : bool = False ):
'''simple docstring'''
if not isinstance(_object , __lowerCamelCase ):
_UpperCAmelCase : List[str] =f"{var_name} must be a dict"
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object ):
_UpperCAmelCase : str =f"{var_name} all keys must be strings"
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object.values() ):
_UpperCAmelCase : int ='nested dictionary ' if nested else ''
_UpperCAmelCase : Optional[int] =f"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 242
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import random
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : int | None = None ):
__lowercase = value
__lowercase = random()
__lowercase = None
__lowercase = None
def __repr__( self : List[str] ):
from pprint import pformat
if self.left is None and self.right is None:
return F"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{F"{self.value}: {self.prior:.5}": (self.left, self.right)} ,indent=1 )
def __str__( self : List[Any] ):
__lowercase = str(self.value ) + ''' '''
__lowercase = str(self.left or '''''' )
__lowercase = str(self.right or '''''' )
return value + left + right
def _A ( A__ , A__ ):
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__lowercase , __lowercase = split(root.left , A__ )
return left, root
else:
__lowercase , __lowercase = split(root.right , A__ )
return root, right
def _A ( A__ , A__ ):
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__lowercase = merge(left.right , A__ )
return left
else:
__lowercase = merge(A__ , right.left )
return right
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = Node(A__ )
__lowercase , __lowercase = split(A__ , A__ )
return merge(merge(A__ , A__ ) , A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = split(A__ , value - 1 )
__lowercase , __lowercase = split(A__ , A__ )
return merge(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def _A ( A__ , A__ ):
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
__lowercase = insert(A__ , int(arg[1:] ) )
elif arg[0] == "-":
__lowercase = erase(A__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def _A ( ):
"""simple docstring"""
__lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
__lowercase = input()
while args != "q":
__lowercase = interact_treap(A__ , A__ )
print(A__ )
__lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 104
|
"""simple docstring"""
def __magic_name__ ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
lowercase : List[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __magic_name__ ( ) -> int:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202
| 0
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a :Optional[Any] = logging.get_logger(__name__)
# General docstring
__a :Optional[Any] = 'RegNetConfig'
# Base docstring
__a :List[str] = 'facebook/regnet-y-040'
__a :Tuple = [1, 1088, 7, 7]
# Image classification docstring
__a :Optional[Any] = 'facebook/regnet-y-040'
__a :int = 'tabby, tabby cat'
__a :Union[str, Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[str] = "relu" , **UpperCAmelCase : Tuple , ):
super().__init__(**__lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ = tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=__lowercase , strides=__lowercase , padding="VALID" , groups=__lowercase , use_bias=__lowercase , name="convolution" , )
A_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
A_ = ACTaFN[activation] if activation is not None else tf.identity
def __A ( self : Dict , UpperCAmelCase : Optional[Any] ):
A_ = self.convolution(self.padding(__lowercase ) )
A_ = self.normalization(__lowercase )
A_ = self.activation(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : RegNetConfig , **UpperCAmelCase : Optional[Any] ):
super().__init__(**__lowercase )
A_ = config.num_channels
A_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __A ( self : Tuple , UpperCAmelCase : Any ):
A_ = shape_list(__lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ = tf.transpose(__lowercase , perm=(0, 2, 3, 1) )
A_ = self.embedder(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : int , UpperCAmelCase : int = 2 , **UpperCAmelCase : Optional[Any] ):
super().__init__(**__lowercase )
A_ = tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=1 , strides=__lowercase , use_bias=__lowercase , name="convolution" )
A_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __A ( self : Union[str, Any] , UpperCAmelCase : tf.Tensor , UpperCAmelCase : bool = False ):
return self.normalization(self.convolution(__lowercase ) , training=__lowercase )
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : int , UpperCAmelCase : int , **UpperCAmelCase : int ):
super().__init__(**__lowercase )
A_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name="pooler" )
A_ = [
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __A ( self : List[Any] , UpperCAmelCase : Optional[int] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
A_ = self.pooler(__lowercase )
for layer_module in self.attention:
A_ = layer_module(__lowercase )
A_ = hidden_state * pooled
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : RegNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , **UpperCAmelCase : int ):
super().__init__(**__lowercase )
A_ = in_channels != out_channels or stride != 1
A_ = max(1 , out_channels // config.groups_width )
A_ = (
TFRegNetShortCut(__lowercase , stride=__lowercase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ = [
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name="layer.2" ),
]
A_ = ACTaFN[config.hidden_act]
def __A ( self : List[str] , UpperCAmelCase : Any ):
A_ = hidden_state
for layer_module in self.layers:
A_ = layer_module(__lowercase )
A_ = self.shortcut(__lowercase )
hidden_state += residual
A_ = self.activation(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : RegNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , **UpperCAmelCase : List[str] ):
super().__init__(**__lowercase )
A_ = in_channels != out_channels or stride != 1
A_ = max(1 , out_channels // config.groups_width )
A_ = (
TFRegNetShortCut(__lowercase , stride=__lowercase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
A_ = [
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name="layer.3" ),
]
A_ = ACTaFN[config.hidden_act]
def __A ( self : int , UpperCAmelCase : List[Any] ):
A_ = hidden_state
for layer_module in self.layers:
A_ = layer_module(__lowercase )
A_ = self.shortcut(__lowercase )
hidden_state += residual
A_ = self.activation(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : RegNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , **UpperCAmelCase : Any ):
super().__init__(**__lowercase )
A_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
A_ = [
# downsampling is done in the first layer with stride of 2
layer(__lowercase , __lowercase , __lowercase , stride=__lowercase , name="layers.0" ),
*[layer(__lowercase , __lowercase , __lowercase , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def __A ( self : str , UpperCAmelCase : Dict ):
for layer_module in self.layers:
A_ = layer_module(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : RegNetConfig , **UpperCAmelCase : List[Any] ):
super().__init__(**__lowercase )
A_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
A_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__lowercase , __lowercase , __lowercase , depth=__lowercase , name=f'''stages.{i+1}''' ) )
def __A ( self : str , UpperCAmelCase : tf.Tensor , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True ):
A_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ = hidden_states + (hidden_state,)
A_ = stage_module(__lowercase )
if output_hidden_states:
A_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowercase , hidden_states=__lowercase )
@keras_serializable
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
_lowerCamelCase : Dict = RegNetConfig
def __init__( self : List[Any] , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ):
super().__init__(**__lowercase )
A_ = config
A_ = TFRegNetEmbeddings(__lowercase , name="embedder" )
A_ = TFRegNetEncoder(__lowercase , name="encoder" )
A_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name="pooler" )
@unpack_inputs
def __A ( self : Union[str, Any] , UpperCAmelCase : tf.Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , ):
A_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ = return_dict if return_dict is not None else self.config.use_return_dict
A_ = self.embedder(__lowercase , training=__lowercase )
A_ = self.encoder(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase )
A_ = encoder_outputs[0]
A_ = self.pooler(__lowercase )
# Change to NCHW output format have uniformity in the modules
A_ = tf.transpose(__lowercase , perm=(0, 3, 1, 2) )
A_ = tf.transpose(__lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ = tuple([tf.transpose(__lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _a ( __lowerCamelCase ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = RegNetConfig
_lowerCamelCase : List[str] = """regnet"""
_lowerCamelCase : Any = """pixel_values"""
@property
def __A ( self : Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__a :Optional[Any] = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a :Optional[int] = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __lowerCamelCase , )
class _a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : RegNetConfig , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ):
super().__init__(__lowercase , *__lowercase , **__lowercase )
A_ = TFRegNetMainLayer(__lowercase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A ( self : Optional[Any] , UpperCAmelCase : tf.Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Tuple=False , ):
A_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ = return_dict if return_dict is not None else self.config.use_return_dict
A_ = self.regnet(
pixel_values=__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __lowerCamelCase , )
class _a ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : RegNetConfig , *UpperCAmelCase : str , **UpperCAmelCase : str ):
super().__init__(__lowercase , *__lowercase , **__lowercase )
A_ = config.num_labels
A_ = TFRegNetMainLayer(__lowercase , name="regnet" )
# classification head
A_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A ( self : Union[str, Any] , UpperCAmelCase : tf.Tensor = None , UpperCAmelCase : tf.Tensor = None , UpperCAmelCase : bool = None , UpperCAmelCase : bool = None , UpperCAmelCase : List[Any]=False , ):
A_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ = return_dict if return_dict is not None else self.config.use_return_dict
A_ = self.regnet(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase )
A_ = outputs.pooler_output if return_dict else outputs[1]
A_ = self.classifier[0](__lowercase )
A_ = self.classifier[1](__lowercase )
A_ = None if labels is None else self.hf_compute_loss(labels=__lowercase , logits=__lowercase )
if not return_dict:
A_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states )
| 350
|
from maths.prime_factors import prime_factors
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329
| 0
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ : Tuple = logging.get_logger(__name__)
class a ( _a ):
_lowerCAmelCase = ['''input_features''']
def __init__( self , __magic_name__=80 , __magic_name__=1_60_00 , __magic_name__=1_60 , __magic_name__=30 , __magic_name__=4_00 , __magic_name__=0.0 , __magic_name__=False , **__magic_name__ , ) -> Optional[int]:
super().__init__(
feature_size=__magic_name__ , sampling_rate=__magic_name__ , padding_value=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
_a = n_fft
_a = hop_length
_a = chunk_length
_a = chunk_length * sampling_rate
_a = self.n_samples // hop_length
_a = sampling_rate
_a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__magic_name__ , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__magic_name__ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self , __magic_name__ ) -> np.ndarray:
_a = spectrogram(
__magic_name__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
_a = log_spec[:, :-1]
_a = np.maximum(__magic_name__ , log_spec.max() - 8.0 )
_a = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __UpperCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
_a = np.array(__magic_name__ , np.intaa )
_a = []
for vector, length in zip(__magic_name__ , attention_mask.sum(-1 ) ):
_a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_a = padding_value
normed_input_values.append(__magic_name__ )
else:
_a = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "max_length" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a = isinstance(__magic_name__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_a = is_batched_numpy or (
isinstance(__magic_name__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__magic_name__ , np.ndarray ):
_a = np.asarray(__magic_name__ , dtype=np.floataa )
elif isinstance(__magic_name__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [np.asarray([raw_speech] ).T]
_a = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
_a = self.pad(
__magic_name__ , padding=__magic_name__ , max_length=max_length if max_length else self.n_samples , truncation=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_a = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
_a = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
_a = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
_a = [self._np_extract_fbank_features(__magic_name__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , __magic_name__ ):
_a = [np.asarray(__magic_name__ , dtype=np.floataa ) for feature in input_features]
else:
_a = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_a = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(__magic_name__ )
return padded_inputs
def __UpperCAmelCase ( self ) -> Dict[str, Any]:
_a = copy.deepcopy(self.__dict__ )
_a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 168
|
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 279
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Optional[int] = '''▁'''
UpperCamelCase : List[str] = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase : List[Any] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
UpperCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 5_1_2,
}
UpperCamelCase : Tuple = logging.get_logger(__name__)
class __lowerCAmelCase ( a__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<pad>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<mask_2>" , __UpperCAmelCase="<mask_1>" , __UpperCAmelCase=None , __UpperCAmelCase=103 , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError(
F'additional_special_tokens should be of type {type(lowerCAmelCase__ )}, but is'
F' {type(lowerCAmelCase__ )}' )
__UpperCamelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(lowerCAmelCase__ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
__UpperCamelCase = additional_special_tokens_extended
else:
__UpperCamelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset )]
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token_sent=lowerCAmelCase__ , offset=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
__UpperCamelCase = mask_token_sent
__UpperCamelCase = vocab_file
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
# add special tokens to encoder dict
__UpperCamelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__UpperCamelCase = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.offset
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__UpperCamelCase = self.sp_model.piece_to_id(lowerCAmelCase__ )
return sp_id + self.offset
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__UpperCamelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
__UpperCamelCase = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def UpperCAmelCase ( self , __UpperCAmelCase=False ):
'''simple docstring'''
return 1
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , 'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 363
|
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : int = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
UpperCamelCase : Optional[Any] = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
UpperCamelCase : Any = {
"jukebox": 5_1_2,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_LYRIC_TOKENS_SIZES
lowercase = ["input_ids", "attention_mask"]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=["v3", "v2", "v2"] , __UpperCAmelCase=512 , __UpperCAmelCase=5 , __UpperCAmelCase="<|endoftext|>" , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
super().__init__(
unk_token=__UpperCAmelCase , n_genres=__UpperCAmelCase , version=__UpperCAmelCase , max_n_lyric_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = version
__UpperCamelCase = max_n_lyric_tokens
__UpperCamelCase = n_genres
with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase = json.load(__UpperCAmelCase )
with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase = json.load(__UpperCAmelCase )
with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase = json.load(__UpperCAmelCase )
__UpperCamelCase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__UpperCamelCase = oov.replace(R'\-\'' , R'\-+\'' )
__UpperCamelCase = regex.compile(__UpperCAmelCase )
__UpperCamelCase = {v: k for k, v in self.artists_encoder.items()}
__UpperCamelCase = {v: k for k, v in self.genres_encoder.items()}
__UpperCamelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = [self.artists_encoder.get(__UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(__UpperCAmelCase ) ):
__UpperCamelCase = [self.genres_encoder.get(__UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__UpperCamelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__UpperCamelCase = [[self.lyrics_encoder.get(__UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return list(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.prepare_for_tokenization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = self._tokenize(__UpperCAmelCase )
return artist, genre, lyrics
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__UpperCamelCase = artists[idx].lower()
__UpperCamelCase = [genres[idx].lower()]
else:
__UpperCamelCase = self._normalize(artists[idx] ) + '.v2'
__UpperCamelCase = [
self._normalize(__UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__UpperCamelCase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__UpperCamelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__UpperCamelCase = {vocab[index]: index + 1 for index in range(len(__UpperCAmelCase ) )}
__UpperCamelCase = 0
__UpperCamelCase = len(__UpperCAmelCase ) + 1
__UpperCamelCase = self.vocab
__UpperCamelCase = {v: k for k, v in self.vocab.items()}
__UpperCamelCase = ''
else:
__UpperCamelCase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__UpperCamelCase = self._run_strip_accents(__UpperCAmelCase )
__UpperCamelCase = lyrics.replace('\\' , '\n' )
__UpperCamelCase = self.out_of_vocab.sub('' , __UpperCAmelCase ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = unicodedata.normalize('NFD' , __UpperCAmelCase )
__UpperCamelCase = []
for char in text:
__UpperCamelCase = unicodedata.category(__UpperCAmelCase )
if cat == "Mn":
continue
output.append(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = (
[chr(__UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(__UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(__UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__UpperCamelCase = frozenset(__UpperCAmelCase )
__UpperCamelCase = re.compile(R'_+' )
__UpperCamelCase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__UpperCamelCase = pattern.sub('_' , __UpperCAmelCase ).strip('_' )
return text
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return " ".join(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = TensorType(__UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__UpperCamelCase = tf.constant
__UpperCamelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__UpperCamelCase = torch.tensor
__UpperCamelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__UpperCamelCase = jnp.array
__UpperCamelCase = _is_jax
else:
__UpperCamelCase = np.asarray
__UpperCamelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__UpperCamelCase = [inputs]
if not is_tensor(__UpperCAmelCase ):
__UpperCamelCase = as_tensor(__UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase="pt" ):
'''simple docstring'''
__UpperCamelCase = [0, 0, 0]
__UpperCamelCase = [artist] * len(self.version )
__UpperCamelCase = [genres] * len(self.version )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.tokenize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self._convert_token_to_id(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = [-INFINITY] * len(full_tokens[-1] )
__UpperCamelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__UpperCAmelCase ) )
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__UpperCAmelCase ) )
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.artists_decoder.get(__UpperCAmelCase )
__UpperCamelCase = [self.genres_decoder.get(__UpperCAmelCase ) for genre in genres_index]
__UpperCamelCase = [self.lyrics_decoder.get(__UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 263
| 0
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
__lowerCamelCase = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(__UpperCAmelCase )
from datasets import load_dataset
__lowerCamelCase = load_dataset('''nielsr/rvlcdip-demo''' )
__lowerCamelCase = dataset['''train'''][0]['''image'''].convert('''RGB''' )
__lowerCamelCase = image_processor(__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**__UpperCAmelCase )
__lowerCamelCase = outputs.logits
__lowerCamelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=__UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a_ = None
try:
import msvcrt
except ImportError:
a_ = None
try:
import fcntl
except ImportError:
a_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a_ = OSError
# Data
# ------------------------------------------------
a_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
a_ = """3.0.12"""
a_ = None
def a__ ( ):
global _logger
__lowerCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock_file
return None
def __str__( self ):
'''simple docstring'''
__lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase )
# The path to the lock file.
__lowerCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowerCamelCase = None
# The default timeout value.
__lowerCamelCase = timeout
# We use this lock primarily for the lock counter.
__lowerCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowerCamelCase = 0
return None
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = float(__UpperCAmelCase )
return None
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ):
'''simple docstring'''
# Use the default timeout, if no timeout is provided.
if timeout is None:
__lowerCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
__lowerCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__UpperCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowerCamelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCamelCase ( self , __UpperCAmelCase=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__lowerCamelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=__UpperCAmelCase )
return None
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = os.path.basename(__UpperCAmelCase )
if len(__UpperCAmelCase ) > max_length and max_length > 0:
__lowerCamelCase = os.path.dirname(__UpperCAmelCase )
__lowerCamelCase = str(hash(__UpperCAmelCase ) )
__lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__UpperCAmelCase , __UpperCAmelCase )
else:
return path
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
__lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
try:
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN )
os.close(__UpperCAmelCase )
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__lowerCamelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a_ = None
if msvcrt:
a_ = WindowsFileLock
elif fcntl:
a_ = UnixFileLock
else:
a_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 330
| 1
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Any = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase__ : Tuple = {
'''b0''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_24,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_40,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 14_08,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_60,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 15_36,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_00,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 17_92,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_80,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 20_48,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_56,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 23_04,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_28,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 25_60,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_00,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __lowercase ( _a ):
snake_case_ : Optional[int] = EfficientNetConfig()
snake_case_ : Tuple = CONFIG_MAP[model_name]['''hidden_dim''']
snake_case_ : Optional[Any] = CONFIG_MAP[model_name]['''width_coef''']
snake_case_ : str = CONFIG_MAP[model_name]['''depth_coef''']
snake_case_ : Optional[int] = CONFIG_MAP[model_name]['''image_size''']
snake_case_ : int = CONFIG_MAP[model_name]['''dropout_rate''']
snake_case_ : Optional[int] = CONFIG_MAP[model_name]['''dw_padding''']
snake_case_ : List[str] = '''huggingface/label-files'''
snake_case_ : Any = '''imagenet-1k-id2label.json'''
snake_case_ : Union[str, Any] = 1_000
snake_case_ : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case_ : int = idalabel
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( ):
snake_case_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
def __lowercase ( _a ):
snake_case_ : Optional[Any] = CONFIG_MAP[model_name]['''image_size''']
snake_case_ : Optional[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_lowerCAmelCase , )
return preprocessor
def __lowercase ( _a ):
snake_case_ : Optional[Any] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
snake_case_ : Union[str, Any] = sorted(set(_lowerCAmelCase ) )
snake_case_ : int = len(_lowerCAmelCase )
snake_case_ : Tuple = {b: str(_lowerCAmelCase ) for b, i in zip(_lowerCAmelCase , range(_lowerCAmelCase ) )}
snake_case_ : List[str] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
snake_case_ : List[str] = block_name_mapping[b]
rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
snake_case_ : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
snake_case_ : str = '''efficientnet.''' + item[1]
snake_case_ : int = '''classifier.weight'''
snake_case_ : Union[str, Any] = '''classifier.bias'''
return key_mapping
def __lowercase ( _a , _a , _a ):
for key, value in tf_params.items():
if "normalization" in key:
continue
snake_case_ : Dict = key_mapping[key]
if "_conv" in key and "kernel" in key:
snake_case_ : Tuple = torch.from_numpy(_lowerCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
snake_case_ : List[Any] = torch.from_numpy(_lowerCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
snake_case_ : str = torch.from_numpy(np.transpose(_lowerCAmelCase ) )
else:
snake_case_ : Any = torch.from_numpy(_lowerCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_lowerCAmelCase )
@torch.no_grad()
def __lowercase ( _a , _a , _a , _a ):
snake_case_ : List[str] = model_classes[model_name](
include_top=_lowerCAmelCase , weights='''imagenet''' , input_tensor=_lowerCAmelCase , input_shape=_lowerCAmelCase , pooling=_lowerCAmelCase , classes=1_000 , classifier_activation='''softmax''' , )
snake_case_ : Tuple = original_model.trainable_variables
snake_case_ : Optional[Any] = original_model.non_trainable_variables
snake_case_ : Any = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
snake_case_ : Dict = param.numpy()
snake_case_ : Any = list(tf_params.keys() )
# Load HuggingFace model
snake_case_ : Any = get_efficientnet_config(_lowerCAmelCase )
snake_case_ : str = EfficientNetForImageClassification(_lowerCAmelCase ).eval()
snake_case_ : Union[str, Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
snake_case_ : Optional[Any] = rename_keys(_lowerCAmelCase )
replace_params(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Initialize preprocessor and preprocess input image
snake_case_ : str = convert_image_processor(_lowerCAmelCase )
snake_case_ : List[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
snake_case_ : Any = hf_model(**_lowerCAmelCase )
snake_case_ : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
snake_case_ : List[Any] = False
snake_case_ : List[str] = CONFIG_MAP[model_name]['''image_size''']
snake_case_ : List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
snake_case_ : List[str] = image.img_to_array(_lowerCAmelCase )
snake_case_ : Optional[Any] = np.expand_dims(_lowerCAmelCase , axis=0 )
snake_case_ : Optional[int] = original_model.predict(_lowerCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_lowerCAmelCase ):
os.mkdir(_lowerCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_lowerCAmelCase )
preprocessor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f"Pushing converted {model_name} to the hub..." )
snake_case_ : Any = f"efficientnet-{model_name}"
preprocessor.push_to_hub(_lowerCAmelCase )
hf_model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase__ : Dict = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 354
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ : Union[str, Any] = get_tests_dir('''fixtures''')
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
# A mock response for an HTTP head request to emulate server down
snake_case_ : Any = mock.Mock()
snake_case_ : Tuple = 500
snake_case_ : Dict = {}
snake_case_ : Optional[Any] = HTTPError
snake_case_ : Optional[int] = {}
# Download this model to make sure it's in the cache.
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowercase_ ) as mock_head:
snake_case_ : Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : Optional[int] ):
# This test is for deprecated behavior and can be removed in v5
snake_case_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase):
@classmethod
def _snake_case ( cls : List[Any] ):
snake_case_ : Dict = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def _snake_case ( cls : int ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def _snake_case ( self : Any ):
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''test-feature-extractor''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : List[Any] ):
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : List[Any] ):
CustomFeatureExtractor.register_for_auto_class()
snake_case_ : int = CustomFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
snake_case_ : List[str] = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 155
| 0
|
"""simple docstring"""
import math
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = []
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : Any = int(math.sqrt(_UpperCamelCase ) ) # Size of every segment
__lowerCAmelCase : Tuple = [True] * (end + 1)
__lowerCAmelCase : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(_UpperCamelCase )
for i in range(start * start , end + 1 , _UpperCamelCase ):
__lowerCAmelCase : int = False
start += 1
prime += in_prime
__lowerCAmelCase : Union[str, Any] = end + 1
__lowerCAmelCase : Tuple = min(2 * end , _UpperCamelCase )
while low <= n:
__lowerCAmelCase : List[str] = [True] * (high - low + 1)
for each in in_prime:
__lowerCAmelCase : int = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_UpperCamelCase , high + 1 , _UpperCamelCase ):
__lowerCAmelCase : Any = False
for j in range(len(_UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCAmelCase : Tuple = high + 1
__lowerCAmelCase : int = min(high + end , _UpperCamelCase )
return prime
print(sieve(10**6))
| 86
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__UpperCamelCase : Any = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" )
__UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
__UpperCamelCase : Union[str, Any] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
__UpperCamelCase : int = original_bort._collect_params_with_prefix()
# Build our config 🤗
__UpperCamelCase : Any = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(snake_case__ ),
}
__UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ )
__UpperCamelCase : str = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ , snake_case__ ):
__UpperCamelCase : Any = hf_param.shape
__UpperCamelCase : List[Any] = to_torch(params[gluon_param] )
__UpperCamelCase : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
__UpperCamelCase : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__UpperCamelCase : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__UpperCamelCase : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__UpperCamelCase : BertSelfAttention = layer.attention.self
__UpperCamelCase : int = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
__UpperCamelCase : str = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
__UpperCamelCase : Tuple = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
__UpperCamelCase : BertSelfOutput = layer.attention.output
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
__UpperCamelCase : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
__UpperCamelCase : BertIntermediate = layer.intermediate
__UpperCamelCase : Dict = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
__UpperCamelCase : BertOutput = layer.output
__UpperCamelCase : Dict = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
__UpperCamelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
__UpperCamelCase : int = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" )
__UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"]
# Get gluon output
__UpperCamelCase : Dict = mx.nd.array([input_ids] )
__UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
__UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
__UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" )
__UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0]
__UpperCamelCase : List[Any] = output_gluon[0].asnumpy()
__UpperCamelCase : Optional[int] = output_hf[0].detach().numpy()
__UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 298
| 0
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__A = logging.getLogger(__name__)
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
_snake_case = np.argmax(_UpperCamelCase , axis=1 )
return np.sum(outputs == labels )
def snake_case_(_UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
with open(_UpperCamelCase , encoding='''utf_8''' ) as f:
_snake_case = csv.reader(_UpperCamelCase )
_snake_case = []
next(_UpperCamelCase ) # skip the first line
for line in tqdm(_UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
_snake_case = []
for dataset in encoded_datasets:
_snake_case = len(_UpperCamelCase )
_snake_case = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_snake_case = np.zeros((n_batch, 2) , dtype=np.intaa )
_snake_case = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
_snake_case = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCamelCase ):
_snake_case = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_snake_case = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_snake_case = with_conta
_snake_case = with_conta
_snake_case = len(_UpperCamelCase ) - 1
_snake_case = len(_UpperCamelCase ) - 1
_snake_case = with_conta
_snake_case = with_conta
_snake_case = mc_label
_snake_case = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def snake_case_() -> Tuple:
"""simple docstring"""
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_UpperCamelCase , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_UpperCamelCase , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_UpperCamelCase , default='''''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_UpperCamelCase , default=3 )
parser.add_argument('''--train_batch_size''' , type=_UpperCamelCase , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_UpperCamelCase , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=_UpperCamelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_UpperCamelCase , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_UpperCamelCase , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCamelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_UpperCamelCase , default=6.25E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_UpperCamelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_UpperCamelCase , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_UpperCamelCase , default=0.01 )
parser.add_argument('''--lm_coef''' , type=_UpperCamelCase , default=0.9 )
parser.add_argument('''--n_valid''' , type=_UpperCamelCase , default=374 )
parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
_snake_case = parser.parse_args()
print(_UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
_snake_case = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase , _UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_snake_case = ['''_start_''', '''_delimiter_''', '''_classify_''']
_snake_case = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCamelCase )
_snake_case = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
_snake_case = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCamelCase ) )
model.to(_UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCamelCase ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
return obj
return [tokenize_and_encode(_UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
_snake_case = load_rocstories_dataset(args.train_dataset )
_snake_case = load_rocstories_dataset(args.eval_dataset )
_snake_case = (train_dataset, eval_dataset)
_snake_case = tokenize_and_encode(_UpperCamelCase )
# Compute the max input length for the Transformer
_snake_case = model.config.n_positions // 2 - 2
_snake_case = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_snake_case = min(_UpperCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_snake_case = pre_process_datasets(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
_snake_case, _snake_case = tensor_datasets[0], tensor_datasets[1]
_snake_case = TensorDataset(*_UpperCamelCase )
_snake_case = RandomSampler(_UpperCamelCase )
_snake_case = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.train_batch_size )
_snake_case = TensorDataset(*_UpperCamelCase )
_snake_case = SequentialSampler(_UpperCamelCase )
_snake_case = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_snake_case = args.max_steps
_snake_case = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
_snake_case = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
_snake_case = list(model.named_parameters() )
_snake_case = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
_snake_case = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
_snake_case = AdamW(_UpperCamelCase , lr=args.learning_rate , eps=args.adam_epsilon )
_snake_case = get_linear_schedule_with_warmup(
_UpperCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_UpperCamelCase )
if args.do_train:
_snake_case, _snake_case, _snake_case = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
_snake_case = 0
_snake_case = 0
_snake_case = tqdm(_UpperCamelCase , desc='''Training''' )
for step, batch in enumerate(_UpperCamelCase ):
_snake_case = tuple(t.to(_UpperCamelCase ) for t in batch )
_snake_case, _snake_case, _snake_case, _snake_case = batch
_snake_case = model(_UpperCamelCase , mc_token_ids=_UpperCamelCase , lm_labels=_UpperCamelCase , mc_labels=_UpperCamelCase )
_snake_case = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_snake_case = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_snake_case = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_snake_case = model.module if hasattr(_UpperCamelCase , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_snake_case = os.path.join(args.output_dir , _UpperCamelCase )
_snake_case = os.path.join(args.output_dir , _UpperCamelCase )
torch.save(model_to_save.state_dict() , _UpperCamelCase )
model_to_save.config.to_json_file(_UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_snake_case = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_snake_case = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCamelCase )
if args.do_eval:
model.eval()
_snake_case, _snake_case = 0, 0
_snake_case, _snake_case = 0, 0
for batch in tqdm(_UpperCamelCase , desc='''Evaluating''' ):
_snake_case = tuple(t.to(_UpperCamelCase ) for t in batch )
_snake_case, _snake_case, _snake_case, _snake_case = batch
with torch.no_grad():
_snake_case, _snake_case, _snake_case, _snake_case = model(
_UpperCamelCase , mc_token_ids=_UpperCamelCase , lm_labels=_UpperCamelCase , mc_labels=_UpperCamelCase )
_snake_case = mc_logits.detach().cpu().numpy()
_snake_case = mc_labels.to('''cpu''' ).numpy()
_snake_case = accuracy(_UpperCamelCase , _UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_snake_case = eval_loss / nb_eval_steps
_snake_case = eval_accuracy / nb_eval_examples
_snake_case = tr_loss / nb_tr_steps if args.do_train else None
_snake_case = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
_snake_case = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _UpperCamelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 352
|
from __future__ import annotations
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> bool:
"""simple docstring"""
_snake_case = get_failure_array(_UpperCamelCase )
# 2) Step through text searching for pattern
_snake_case, _snake_case = 0, 0 # index into text, pattern
while i < len(_UpperCamelCase ):
if pattern[j] == text[i]:
if j == (len(_UpperCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_snake_case = failure[j - 1]
continue
i += 1
return False
def snake_case_(_UpperCamelCase ) -> list[int]:
"""simple docstring"""
_snake_case = [0]
_snake_case = 0
_snake_case = 1
while j < len(_UpperCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_snake_case = failure[i - 1]
continue
j += 1
failure.append(_UpperCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
__A = '''abc1abc12'''
__A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__A = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__A = '''ABABX'''
__A = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
__A = '''AAAB'''
__A = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
__A = '''abcdabcy'''
__A = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
__A = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 278
| 0
|
from collections import deque
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = process_name # process name
UpperCamelCase__: Optional[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCamelCase__: Tuple = arrival_time
UpperCamelCase__: str = burst_time # remaining burst time
UpperCamelCase__: int = 0 # total time of the process wait in ready queue
UpperCamelCase__: List[Any] = 0 # time from arrival time to completion time
class _a :
"""simple docstring"""
def __init__( self: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: list[int] , __lowerCamelCase: deque[Process] , __lowerCamelCase: int , ):
'''simple docstring'''
UpperCamelCase__: List[str] = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCamelCase__: Optional[Any] = time_slices
# unfinished process is in this ready_queue
UpperCamelCase__: Optional[int] = queue
# current time
UpperCamelCase__: Any = current_time
# finished process is in this sequence queue
UpperCamelCase__: deque[Process] = deque()
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: Dict = []
for i in range(len(__lowerCamelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: int = []
for i in range(len(__lowerCamelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = []
for i in range(len(__lowerCamelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: deque[Process] ):
'''simple docstring'''
return [q.burst_time for q in queue]
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: Process ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: deque[Process] ):
'''simple docstring'''
UpperCamelCase__: deque[Process] = deque() # sequence deque of finished process
while len(__lowerCamelCase ) != 0:
UpperCamelCase__: int = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__lowerCamelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCamelCase__: Optional[int] = 0
# set the process's turnaround time because it is finished
UpperCamelCase__: Optional[Any] = self.current_time - cp.arrival_time
# set the completion time
UpperCamelCase__: List[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: deque[Process] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__lowerCamelCase ) ):
UpperCamelCase__: str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__lowerCamelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCamelCase__: Optional[int] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__lowerCamelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCamelCase__: Optional[int] = 0
# set the finish time
UpperCamelCase__: Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
UpperCamelCase__: Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
UpperCamelCase__ , UpperCamelCase__: Dict = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A__: Any = Process('''P1''', 0, 53)
A__: Tuple = Process('''P2''', 0, 17)
A__: Tuple = Process('''P3''', 0, 68)
A__: Tuple = Process('''P4''', 0, 24)
A__: Any = 3
A__: str = [17, 25]
A__: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
A__: str = Process('''P1''', 0, 53)
A__: Union[str, Any] = Process('''P2''', 0, 17)
A__: Optional[Any] = Process('''P3''', 0, 68)
A__: str = Process('''P4''', 0, 24)
A__: Any = 3
A__: Optional[Any] = [17, 25]
A__: Any = deque([Pa, Pa, Pa, Pa])
A__: Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
A__: str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 149
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _a :
"""simple docstring"""
def __init__( self: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=sys.maxsize ):
'''simple docstring'''
UpperCamelCase__: List[Any] = "bilinear"
UpperCamelCase__: Optional[int] = max_size
UpperCamelCase__: Optional[int] = short_edge_length
def __call__( self: Optional[Any] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = []
for img in imgs:
UpperCamelCase__ , UpperCamelCase__: Any = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCamelCase__: Optional[int] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCamelCase__: Dict = size * 1.0 / min(__lowerCamelCase , __lowerCamelCase )
if h < w:
UpperCamelCase__ , UpperCamelCase__: Optional[Any] = size, scale * w
else:
UpperCamelCase__ , UpperCamelCase__: Dict = scale * h, size
if max(__lowerCamelCase , __lowerCamelCase ) > self.max_size:
UpperCamelCase__: str = self.max_size * 1.0 / max(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: List[str] = newh * scale
UpperCamelCase__: Any = neww * scale
UpperCamelCase__: List[str] = int(neww + 0.5 )
UpperCamelCase__: List[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCamelCase__: Dict = Image.fromarray(__lowerCamelCase )
UpperCamelCase__: Any = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCamelCase__: str = np.asarray(__lowerCamelCase )
else:
UpperCamelCase__: Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCamelCase__: Optional[Any] = nn.functional.interpolate(
__lowerCamelCase , (newh, neww) , mode=self.interp_method , align_corners=__lowerCamelCase ).squeeze(0 )
img_augs.append(__lowerCamelCase )
return img_augs
class _a :
"""simple docstring"""
def __init__( self: Dict , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCamelCase__: Union[str, Any] = cfg.INPUT.FORMAT
UpperCamelCase__: Union[str, Any] = cfg.SIZE_DIVISIBILITY
UpperCamelCase__: Tuple = cfg.PAD_VALUE
UpperCamelCase__: str = cfg.INPUT.MAX_SIZE_TEST
UpperCamelCase__: int = cfg.MODEL.DEVICE
UpperCamelCase__: str = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__: int = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__: List[Any] = lambda __lowerCamelCase : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = tuple(max(__lowerCamelCase ) for s in zip(*[img.shape for img in images] ) )
UpperCamelCase__: Tuple = [im.shape[-2:] for im in images]
UpperCamelCase__: Optional[int] = [
nn.functional.pad(
__lowerCamelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__lowerCamelCase , __lowerCamelCase )
]
return torch.stack(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
def __call__( self: str , __lowerCamelCase: Dict , __lowerCamelCase: Any=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__: int = [images]
if single_image:
assert len(__lowerCamelCase ) == 1
for i in range(len(__lowerCamelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__lowerCamelCase , images.pop(__lowerCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__lowerCamelCase , torch.as_tensor(img_tensorize(images.pop(__lowerCamelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCamelCase__: int = torch.tensor([im.shape[:2] for im in images] )
UpperCamelCase__: int = self.aug(__lowerCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCamelCase__: Any = [self.normalizer(__lowerCamelCase ) for x in images]
# now pad them to do the following operations
UpperCamelCase__ , UpperCamelCase__: Any = self.pad(__lowerCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCamelCase__: Optional[int] = torch.true_divide(__lowerCamelCase , __lowerCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase_ ( A_ ,A_):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase_ ( A_ ,A_):
assert torch.isfinite(A_).all(), "Box tensor contains infinite or NaN!"
UpperCamelCase__ , UpperCamelCase__: int = box_size
tensor[:, 0].clamp_(min=0 ,max=A_)
tensor[:, 1].clamp_(min=0 ,max=A_)
tensor[:, 2].clamp_(min=0 ,max=A_)
tensor[:, 3].clamp_(min=0 ,max=A_)
| 149
| 1
|
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , __UpperCAmelCase : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
a = val
a = None
a = None
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] ) ->Tuple:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
a = Node(__UpperCAmelCase )
else:
self.left.insert(__UpperCAmelCase )
elif val > self.val:
if self.right is None:
a = Node(__UpperCAmelCase )
else:
self.right.insert(__UpperCAmelCase )
else:
a = val
def _a ( a :Union[str, Any] , a :List[str] ) -> Dict:
# Recursive traversal
if root:
inorder(root.left , _lowerCAmelCase )
res.append(root.val )
inorder(root.right , _lowerCAmelCase )
def _a ( a :Any ) -> List[Any]:
# Build BST
if len(_lowerCAmelCase ) == 0:
return arr
a = Node(arr[0] )
for i in range(1 , len(_lowerCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
a = []
inorder(_lowerCAmelCase , _lowerCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 353
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger("transformers.models.speecht5")
def _a ( a :Optional[Any] , a :Tuple , a :Dict ) -> List[str]:
hf_model.apply_weight_norm()
a = checkpoint['''input_conv.weight_g''']
a = checkpoint['''input_conv.weight_v''']
a = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
a = checkpoint[F"""upsamples.{i}.1.weight_g"""]
a = checkpoint[F"""upsamples.{i}.1.weight_v"""]
a = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
a = checkpoint['''output_conv.1.weight_g''']
a = checkpoint['''output_conv.1.weight_v''']
a = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( a :List[str] , a :Union[str, Any] , a :Dict , a :Dict=None , a :List[Any]=None , ) -> int:
if config_path is not None:
a = SpeechTaHifiGanConfig.from_pretrained(a )
else:
a = SpeechTaHifiGanConfig()
a = SpeechTaHifiGan(a )
a = torch.load(a )
load_weights(orig_checkpoint['''model''']['''generator'''] , a , a )
a = np.load(a )
a = stats[0].reshape(-1 )
a = stats[1].reshape(-1 )
a = torch.from_numpy(a ).float()
a = torch.from_numpy(a ).float()
model.save_pretrained(a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCAmelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 26
| 0
|
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ : List[str] = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 327
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 0
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 351
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Union[str, Any] ) -> Tuple:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = ort.SessionOptions()
__lowerCAmelCase = False
return options
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
__lowerCAmelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = 'A red cat sitting on a park bench'
__lowerCAmelCase = np.random.RandomState(0 )
__lowerCAmelCase = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=lowerCAmelCase_ , output_type='np' , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 207
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : List[str] ) -> int:
_lowerCAmelCase : str = original_name.split(""".""" )[0]
_lowerCAmelCase : Optional[int] = key.split(""".""" )
_lowerCAmelCase : Any = int(key_list[key_list.index(_lowerCamelCase ) - 2] )
_lowerCAmelCase : Tuple = int(key_list[key_list.index(_lowerCamelCase ) - 1] )
_lowerCAmelCase : Optional[int] = orig_block_num - offset
_lowerCAmelCase : Tuple = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" ,f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase , _lowerCAmelCase : int = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
_lowerCAmelCase : Union[str, Any] = key.replace("""network""" ,"""poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
_lowerCAmelCase : Optional[Any] = key[: key.find("""proj""" )]
_lowerCAmelCase : Optional[int] = key.replace(_lowerCamelCase ,f"patch_embeddings.{total_embed_found}." )
_lowerCAmelCase : Tuple = key.replace("""proj""" ,"""projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
_lowerCAmelCase : Optional[Any] = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
_lowerCAmelCase : Optional[int] = replace_key_with_offset(_lowerCamelCase ,_lowerCamelCase ,"""mlp.fc1""" ,"""output.conv1""" )
if "mlp.fc2" in key:
_lowerCAmelCase : List[str] = replace_key_with_offset(_lowerCamelCase ,_lowerCamelCase ,"""mlp.fc2""" ,"""output.conv2""" )
if "norm1" in key:
_lowerCAmelCase : List[Any] = replace_key_with_offset(_lowerCamelCase ,_lowerCamelCase ,"""norm1""" ,"""before_norm""" )
if "norm2" in key:
_lowerCAmelCase : List[str] = replace_key_with_offset(_lowerCamelCase ,_lowerCamelCase ,"""norm2""" ,"""after_norm""" )
if "layer_scale_1" in key:
_lowerCAmelCase : Dict = replace_key_with_offset(_lowerCamelCase ,_lowerCamelCase ,"""layer_scale_1""" ,"""layer_scale_1""" )
if "layer_scale_2" in key:
_lowerCAmelCase : Tuple = replace_key_with_offset(_lowerCamelCase ,_lowerCamelCase ,"""layer_scale_2""" ,"""layer_scale_2""" )
if "head" in key:
_lowerCAmelCase : Union[str, Any] = key.replace("""head""" ,"""classifier""" )
_lowerCAmelCase : str = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( ) -> Dict:
_lowerCAmelCase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase ,stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any ,_lowerCamelCase : str ) -> Dict:
_lowerCAmelCase : int = PoolFormerConfig()
# set attributes based on model_name
_lowerCAmelCase : int = """huggingface/label-files"""
_lowerCAmelCase : Tuple = model_name[-3:]
_lowerCAmelCase : str = 1000
_lowerCAmelCase : Tuple = """imagenet-1k-id2label.json"""
_lowerCAmelCase : Dict = (1, 1000)
# set config attributes
_lowerCAmelCase : List[str] = json.load(open(hf_hub_download(_lowerCamelCase ,_lowerCamelCase ,repo_type="""dataset""" ) ,"""r""" ) )
_lowerCAmelCase : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
if size == "s12":
_lowerCAmelCase : List[str] = [2, 2, 6, 2]
_lowerCAmelCase : int = [64, 128, 320, 512]
_lowerCAmelCase : int = 4.0
_lowerCAmelCase : List[Any] = 0.9
elif size == "s24":
_lowerCAmelCase : Tuple = [4, 4, 12, 4]
_lowerCAmelCase : Any = [64, 128, 320, 512]
_lowerCAmelCase : int = 4.0
_lowerCAmelCase : List[str] = 0.9
elif size == "s36":
_lowerCAmelCase : List[str] = [6, 6, 18, 6]
_lowerCAmelCase : Optional[Any] = [64, 128, 320, 512]
_lowerCAmelCase : Any = 4.0
_lowerCAmelCase : int = 1e-6
_lowerCAmelCase : str = 0.9
elif size == "m36":
_lowerCAmelCase : str = [6, 6, 18, 6]
_lowerCAmelCase : Any = [96, 192, 384, 768]
_lowerCAmelCase : Tuple = 4.0
_lowerCAmelCase : List[Any] = 1e-6
_lowerCAmelCase : List[str] = 0.95
elif size == "m48":
_lowerCAmelCase : Union[str, Any] = [8, 8, 24, 8]
_lowerCAmelCase : int = [96, 192, 384, 768]
_lowerCAmelCase : str = 4.0
_lowerCAmelCase : Any = 1e-6
_lowerCAmelCase : str = 0.95
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
_lowerCAmelCase : List[str] = PoolFormerImageProcessor(crop_pct=_lowerCamelCase )
# Prepare image
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : str = image_processor(images=_lowerCamelCase ,return_tensors="""pt""" ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
_lowerCAmelCase : List[Any] = torch.load(_lowerCamelCase ,map_location=torch.device("""cpu""" ) )
# rename keys
_lowerCAmelCase : str = rename_keys(_lowerCamelCase )
# create HuggingFace model and load state dict
_lowerCAmelCase : Optional[int] = PoolFormerForImageClassification(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Define image processor
_lowerCAmelCase : str = PoolFormerImageProcessor(crop_pct=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = image_processor(images=prepare_img() ,return_tensors="""pt""" ).pixel_values
# forward pass
_lowerCAmelCase : Union[str, Any] = model(_lowerCamelCase )
_lowerCAmelCase : Dict = outputs.logits
# define expected logit slices for different models
if size == "s12":
_lowerCAmelCase : str = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
_lowerCAmelCase : Optional[Any] = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
_lowerCAmelCase : Union[str, Any] = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
_lowerCAmelCase : str = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
_lowerCAmelCase : Optional[int] = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,_lowerCamelCase ,atol=1e-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_a : Tuple = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44
| 1
|
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_UpperCAmelCase = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class _UpperCamelCase ( tr.AbstractTransform ):
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: str = " " ) -> str:
"""simple docstring"""
UpperCamelCase_ = sentence_delimiter
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: str ) -> Any:
"""simple docstring"""
return list(_SCREAMING_SNAKE_CASE )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = []
for sent_idx, sentence in enumerate(_SCREAMING_SNAKE_CASE ):
chars.extend(self.process_string(_SCREAMING_SNAKE_CASE ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_SCREAMING_SNAKE_CASE ) - 1:
chars.append(self.sentence_delimiter )
return chars
_UpperCAmelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_UpperCAmelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_UpperCAmelCase = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
_UpperCAmelCase = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
_UpperCAmelCase = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
def lowercase ( self: Optional[Any] ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int]=False ) -> Dict:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , truth_transform=_SCREAMING_SNAKE_CASE , hypothesis_transform=_SCREAMING_SNAKE_CASE , )["wer"]
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for prediction, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = jiwer.compute_measures(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , truth_transform=_SCREAMING_SNAKE_CASE , hypothesis_transform=_SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 369
|
import re
from filelock import FileLock
try:
import nltk
_UpperCAmelCase = True
except (ImportError, ModuleNotFoundError):
_UpperCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
re.sub("<n>" , "" , UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 328
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class A__ ( __snake_case ):
_UpperCAmelCase :List[Any] = 'luke'
def __init__( self , A_=5_0267 , A_=50_0000 , A_=768 , A_=256 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=True , A_=None , A_=1 , A_=0 , A_=2 , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : Any = vocab_size
UpperCamelCase : int = entity_vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : Dict = entity_emb_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : int = hidden_act
UpperCamelCase : Any = intermediate_size
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : Optional[int] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = max_position_embeddings
UpperCamelCase : List[Any] = type_vocab_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Dict = layer_norm_eps
UpperCamelCase : str = use_entity_aware_attention
UpperCamelCase : Optional[int] = classifier_dropout
| 52
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.framework == "tf":
A_ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = self.get_masked_index(snake_case_ )
A_ : str = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None , **snake_case_ ):
"""simple docstring"""
if return_tensors is None:
A_ : Any = self.framework
A_ : Dict = self.tokenizer(snake_case_ , return_tensors=snake_case_ )
self.ensure_exactly_one_mask_token(snake_case_ )
return model_inputs
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Dict = self.model(**snake_case_ )
A_ : Optional[int] = model_inputs['input_ids']
return model_outputs
def lowerCamelCase_ ( self , snake_case_ , snake_case_=5 , snake_case_=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
A_ : str = target_ids.shape[0]
A_ : Optional[Any] = model_outputs['input_ids'][0]
A_ : List[Any] = model_outputs['logits']
if self.framework == "tf":
A_ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A_ : Union[str, Any] = outputs.numpy()
A_ : Optional[int] = outputs[0, masked_index, :]
A_ : Optional[Any] = stable_softmax(snake_case_ , axis=-1 )
if target_ids is not None:
A_ : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) )
A_ : Optional[int] = tf.expand_dims(snake_case_ , 0 )
A_ : Any = tf.math.top_k(snake_case_ , k=snake_case_ )
A_ , A_ : str = topk.values.numpy(), topk.indices.numpy()
else:
A_ : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A_ : Tuple = outputs[0, masked_index, :]
A_ : List[str] = logits.softmax(dim=-1 )
if target_ids is not None:
A_ : str = probs[..., target_ids]
A_ , A_ : List[str] = probs.topk(snake_case_ )
A_ : List[Any] = []
A_ : int = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A_ : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A_ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
A_ : str = target_ids[p].tolist()
A_ : Union[str, Any] = p
# Filter padding out:
A_ : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A_ : Any = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
A_ : Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(snake_case_ )
result.append(snake_case_ )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : List[str] = [targets]
try:
A_ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
A_ : int = {}
A_ : Tuple = []
for target in targets:
A_ : int = vocab.get(snake_case_ , snake_case_ )
if id_ is None:
A_ : Tuple = self.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )['input_ids']
if len(snake_case_ ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
A_ : str = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
A_ : Tuple = list(set(snake_case_ ) )
if len(snake_case_ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
A_ : Optional[Any] = np.array(snake_case_ )
return target_ids
def lowerCamelCase_ ( self , snake_case_=None , snake_case_=None ):
"""simple docstring"""
A_ : List[str] = {}
if targets is not None:
A_ : Any = self.get_target_ids(snake_case_ , snake_case_ )
A_ : Optional[Any] = target_ids
if top_k is not None:
A_ : int = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : List[str] = super().__call__(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1:
return outputs[0]
return outputs
| 286
| 0
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCAmelCase_ = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
lowerCAmelCase_ = """hopper-medium-v2"""
lowerCAmelCase_ = gym.make(env_name)
lowerCAmelCase_ = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
lowerCAmelCase_ = env.reset()
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1000
lowerCAmelCase_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCAmelCase_ = pipeline(obs, planning_horizon=32)
# execute action in environment
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = env.step(denorm_actions)
lowerCAmelCase_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCAmelCase_ = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 260
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_ ( lowerCAmelCase: Features )-> Optional[int]:
_snake_case : str = np.inf
def set_batch_size(lowerCAmelCase: FeatureType ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase , lowerCAmelCase ) and feature.dtype == "binary":
_snake_case : Union[str, Any] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase , lowerCAmelCase )
return None if batch_size is np.inf else batch_size
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : NestedDataStructureLike[PathLike] , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : Optional[Features] = None , UpperCamelCase : str = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , num_proc=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = path_or_paths if isinstance(UpperCamelCase , UpperCamelCase ) else {self.split: path_or_paths}
_snake_case : List[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
_snake_case : Optional[Any] = Parquet(
cache_dir=UpperCamelCase , data_files=UpperCamelCase , features=UpperCamelCase , hash=UpperCamelCase , **UpperCamelCase , )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.streaming:
_snake_case : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_snake_case : Dict = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
_snake_case : Optional[Any] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase , download_mode=UpperCamelCase , verification_mode=UpperCamelCase , base_path=UpperCamelCase , num_proc=self.num_proc , )
_snake_case : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase : Dataset , UpperCamelCase : Union[PathLike, BinaryIO] , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
_snake_case : Tuple = dataset
_snake_case : Union[str, Any] = path_or_buf
_snake_case : List[Any] = batch_size or get_writer_batch_size(dataset.features )
_snake_case : Optional[Any] = parquet_writer_kwargs
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
_snake_case : Any = self._write(file_obj=UpperCamelCase , batch_size=UpperCamelCase , **self.parquet_writer_kwargs )
else:
_snake_case : Tuple = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase , **self.parquet_writer_kwargs )
return written
def UpperCamelCase_ ( self : Dict , UpperCamelCase : BinaryIO , UpperCamelCase : int , **UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : List[str] = 0
_snake_case : Dict = parquet_writer_kwargs.pop('path_or_buf' , UpperCamelCase )
_snake_case : Optional[Any] = self.dataset.features.arrow_schema
_snake_case : str = pq.ParquetWriter(UpperCamelCase , schema=UpperCamelCase , **UpperCamelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
_snake_case : Tuple = query_table(
table=self.dataset._data , key=slice(UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase )
written += batch.nbytes
writer.close()
return written
| 260
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
_lowerCAmelCase = TypeVar('''T''')
class lowerCAmelCase_( Generic[T] ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> None:
lowerCAmelCase__ : Optional[Any] = data
lowerCAmelCase__ : Any = self
lowerCAmelCase__ : Tuple = 0
class lowerCAmelCase_( Generic[T] ):
'''simple docstring'''
def __init__( self ) -> None:
# map from node name to the node object
lowerCAmelCase__ : dict[T, DisjointSetTreeNode[T]] = {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
# create a new set with x as its member
lowerCAmelCase__ : Optional[Any] = DisjointSetTreeNode(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
lowerCAmelCase__ : int = self.map[data]
if elem_ref != elem_ref.parent:
lowerCAmelCase__ : Union[str, Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
lowerCAmelCase__ : Any = nodea
else:
lowerCAmelCase__ : List[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(__UpperCAmelCase ) ,self.find_set(__UpperCAmelCase ) )
class lowerCAmelCase_( Generic[T] ):
'''simple docstring'''
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
lowerCAmelCase__ : dict[T, dict[T, int]] = {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowerCAmelCase__ : List[Any] = {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# add an edge with the given weight
self.add_node(__UpperCAmelCase )
self.add_node(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = weight
lowerCAmelCase__ : List[str] = weight
def UpperCAmelCase_ ( self ) -> GraphUndirectedWeighted[T]:
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Tuple = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __UpperCAmelCase : x[2] )
# creating the disjoint set
lowerCAmelCase__ : List[Any] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__UpperCAmelCase )
# MST generation
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Dict = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = edges[index]
index += 1
lowerCAmelCase__ : Tuple = disjoint_set.find_set(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = disjoint_set.find_set(__UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
disjoint_set.union(__UpperCAmelCase ,__UpperCAmelCase )
return graph
| 37
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ) -> List[Any]:
lowerCamelCase = lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowerCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowerCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowerCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowerCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ) -> List[str]:
if split_mlp_wi:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowerCamelCase = (wi_a, wi_a)
else:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def a__ ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ) -> Dict:
lowerCamelCase = traverse_util.flatten_dict(variables["""target"""] )
lowerCamelCase = {"""/""".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , snake_case__ )
lowerCamelCase = collections.OrderedDict()
# Shared embeddings.
lowerCamelCase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , """encoder""" ).T
lowerCamelCase = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """encoder""" ).T
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 2 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T
lowerCamelCase = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase = old["""decoder/logits_dense/kernel"""].T
return new
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCamelCase = state_dict["""shared.weight"""]
return state_dict
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = checkpoints.load_tax_checkpoint(snake_case__ )
lowerCamelCase = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
lowerCamelCase = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ) -> str:
lowerCamelCase = MTaConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase = UMTaEncoderModel(snake_case__ )
else:
lowerCamelCase = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("""Done""" )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 291
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_UpperCamelCase = key.replace('''heads.cmd.mim_head.cls.predictions''', '''mmm_image_head''' )
_UpperCamelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''', '''mmm_text_head''' )
_UpperCamelCase = key.replace('''heads.cmd.itm_head.cls''', '''itm_head''' )
_UpperCamelCase = key.replace('''heads.cmd.itm_head.pooler''', '''itm_head.pooler''' )
_UpperCamelCase = key.replace('''heads.cmd.clip_head.logit_scale''', '''flava.logit_scale''' )
_UpperCamelCase = key.replace('''heads.fairseq_mlm.cls.predictions''', '''mlm_head''' )
_UpperCamelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''', '''mim_head''' )
_UpperCamelCase = key.replace('''mm_text_projection''', '''flava.text_to_mm_projection''' )
_UpperCamelCase = key.replace('''mm_image_projection''', '''flava.image_to_mm_projection''' )
_UpperCamelCase = key.replace('''image_encoder.module''', '''flava.image_model''' )
_UpperCamelCase = key.replace('''text_encoder.module''', '''flava.text_model''' )
_UpperCamelCase = key.replace('''mm_encoder.module.encoder.cls_token''', '''flava.multimodal_model.cls_token''' )
_UpperCamelCase = key.replace('''mm_encoder.module''', '''flava.multimodal_model''' )
_UpperCamelCase = key.replace('''text_projection''', '''flava.text_projection''' )
_UpperCamelCase = key.replace('''image_projection''', '''flava.image_projection''' )
_UpperCamelCase = value.float()
for key, value in codebook_state_dict.items():
_UpperCamelCase = value
return upgrade
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None ) -> int:
"""simple docstring"""
if config_path is not None:
_UpperCamelCase = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
_UpperCamelCase = FlavaConfig()
_UpperCamelCase = FlavaForPreTraining(SCREAMING_SNAKE_CASE_ ).eval()
_UpperCamelCase = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, save_checkpoint=SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase = torch.load(SCREAMING_SNAKE_CASE_, map_location='''cpu''' )
else:
_UpperCamelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_, map_location='''cpu''' )
_UpperCamelCase = upgrade_state_dict(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = hf_model.state_dict()
_UpperCamelCase = count_parameters(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = count_parameters(SCREAMING_SNAKE_CASE_ ) + count_parameters(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_a = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 363
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_a = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 100
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__a : Optional[int] = img
__a : Any = img.shape[1]
__a : Optional[int] = img.shape[0]
__a : Tuple = dst_width
__a : List[Any] = dst_height
__a : Optional[int] = self.src_w / self.dst_w
__a : Tuple = self.src_h / self.dst_h
__a : Union[str, Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _lowerCamelCase ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__a : Optional[int] = self.img[self.get_y(_UpperCAmelCase )][self.get_x(_UpperCAmelCase )]
def _lowerCamelCase ( self , _UpperCAmelCase ):
return int(self.ratio_x * x )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return int(self.ratio_y * y )
if __name__ == "__main__":
A , A = 800, 600
A = imread('''image_data/lena.jpg''', 1)
A = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 160
| 1
|
'''simple docstring'''
import operator
def UpperCAmelCase_ ( __lowerCamelCase : list ,__lowerCamelCase : bool = False ,__lowerCamelCase : list | None = None ):
lowercase_ :Union[str, Any] = operator.lt if reverse else operator.gt
lowercase_ :str = solution or []
if not arr:
return solution
lowercase_ :List[Any] = [arr.pop(0 )]
for i, item in enumerate(__lowerCamelCase ):
if _operator(__lowerCamelCase ,sublist[-1] ):
sublist.append(__lowerCamelCase )
arr.pop(__lowerCamelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCamelCase )
else:
while sublist:
lowercase_ :Tuple = sublist.pop(0 )
for i, xx in enumerate(__lowerCamelCase ):
if not _operator(__lowerCamelCase ,__lowerCamelCase ):
solution.insert(__lowerCamelCase ,__lowerCamelCase )
break
else:
solution.append(__lowerCamelCase )
strand_sort(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 147
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCAmelCase : Any =logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ):
if isinstance(__lowerCamelCase ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCamelCase ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCamelCase ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class a_ ( _lowerCAmelCase ):
__A = ["pixel_values"]
def __init__( self : List[str] , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Tuple , ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :Any = size if size is not None else {"shortest_edge": 256}
lowercase_ :int = get_size_dict(lowercase , default_to_square=lowercase )
lowercase_ :str = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowercase_ :List[str] = get_size_dict(lowercase , param_name="crop_size" )
lowercase_ :List[str] = do_resize
lowercase_ :Any = size
lowercase_ :Union[str, Any] = do_center_crop
lowercase_ :Union[str, Any] = crop_size
lowercase_ :Optional[Any] = resample
lowercase_ :List[str] = do_rescale
lowercase_ :List[Any] = rescale_factor
lowercase_ :Dict = offset
lowercase_ :Optional[Any] = do_normalize
lowercase_ :Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ :Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Optional[Any] , ):
"""simple docstring"""
lowercase_ :List[Any] = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" in size:
lowercase_ :int = get_resize_output_image_size(lowercase , size["shortest_edge"] , default_to_square=lowercase )
elif "height" in size and "width" in size:
lowercase_ :Union[str, Any] = (size["height"], size["width"])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : str , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : str , ):
"""simple docstring"""
lowercase_ :Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def lowercase__ ( self : List[str] , lowercase : np.ndarray , lowercase : Union[int, float] , lowercase : bool = True , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] , ):
"""simple docstring"""
lowercase_ :List[str] = image.astype(np.floataa )
if offset:
lowercase_ :List[str] = image - (scale / 2)
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : Tuple , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Dict , ):
"""simple docstring"""
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : Tuple , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : bool = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
lowercase_ :Optional[int] = to_numpy_array(lowercase )
if do_resize:
lowercase_ :Tuple = self.resize(image=lowercase , size=lowercase , resample=lowercase )
if do_center_crop:
lowercase_ :Any = self.center_crop(lowercase , size=lowercase )
if do_rescale:
lowercase_ :Optional[Any] = self.rescale(image=lowercase , scale=lowercase , offset=lowercase )
if do_normalize:
lowercase_ :Tuple = self.normalize(image=lowercase , mean=lowercase , std=lowercase )
lowercase_ :Optional[Any] = to_channel_dimension_format(lowercase , lowercase )
return image
def lowercase__ ( self : Dict , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : bool = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : Optional[int] , ):
"""simple docstring"""
lowercase_ :str = do_resize if do_resize is not None else self.do_resize
lowercase_ :Optional[Any] = resample if resample is not None else self.resample
lowercase_ :Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ :Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ :Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ :Dict = offset if offset is not None else self.offset
lowercase_ :Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ :int = image_mean if image_mean is not None else self.image_mean
lowercase_ :Optional[int] = image_std if image_std is not None else self.image_std
lowercase_ :int = size if size is not None else self.size
lowercase_ :Optional[int] = get_size_dict(lowercase , default_to_square=lowercase )
lowercase_ :List[Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ :List[str] = get_size_dict(lowercase , param_name="crop_size" )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
lowercase_ :List[str] = make_batched(lowercase )
lowercase_ :List[Any] = [
[
self._preprocess_image(
image=lowercase , do_resize=lowercase , size=lowercase , resample=lowercase , do_center_crop=lowercase , crop_size=lowercase , do_rescale=lowercase , rescale_factor=lowercase , offset=lowercase , do_normalize=lowercase , image_mean=lowercase , image_std=lowercase , data_format=lowercase , )
for img in video
]
for video in videos
]
lowercase_ :Optional[int] = {"pixel_values": videos}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 147
| 1
|
'''simple docstring'''
import requests
UpperCamelCase__ = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def a__ ( lowerCAmelCase__ ) -> None:
# fetching a list of articles in json format
UpperCAmelCase__ : Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"""{i}.) {article["title"]}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 181
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = LEDTokenizer
lowerCAmelCase__ = LEDTokenizerFast
lowerCAmelCase__ = True
def lowercase_ ( self : int ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase__ : Any = {'''unk_token''': '''<unk>'''}
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def lowercase_ ( self : Optional[int] , **_A : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Union[str, Any] , **_A : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Tuple , _A : List[str] ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def lowercase_ ( self : Any ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase__ : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Union[str, Any] = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase__ : int = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
@require_torch
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : List[str] = tokenizer(_A , padding=_A , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''labels''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
@require_torch
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Optional[Any] = tokenizer(text_target=_A , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def lowercase_ ( self : Tuple ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Any = tokenizer(
['''I am a small frog''' * 1_024, '''I am a small frog'''] , padding=_A , truncation=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = ['''A long paragraph for summarization.''']
UpperCAmelCase__ : List[Any] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Optional[Any] = tokenizer(_A , return_tensors='''pt''' )
UpperCAmelCase__ : int = tokenizer(text_target=_A , return_tensors='''pt''' )
UpperCAmelCase__ : str = inputs['''input_ids''']
UpperCAmelCase__ : Tuple = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Tuple = ['''Summary of the text.''', '''Another summary.''']
UpperCAmelCase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase__ : List[str] = tokenizer(_A , padding=_A )
UpperCAmelCase__ : str = [[0] * len(_A ) for x in encoded_output['''input_ids''']]
UpperCAmelCase__ : Any = tokenizer.pad(_A )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowercase_ ( self : Dict ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : int = self.tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : Any = '''A, <mask> AllenNLP sentence.'''
UpperCAmelCase__ : Dict = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
UpperCAmelCase__ : Optional[int] = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
UpperCAmelCase__ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
UpperCAmelCase__ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 181
| 1
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __lowercase ( snake_case_ : List[Any] ) ->Optional[Any]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __lowercase ( ) ->Optional[int]:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
__A : List[str] = [1, 2, 3]
with pytest.raises(snake_case_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(snake_case_ ,snake_case_ ,num_proc=2 )
with pytest.raises(snake_case_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(snake_case_ ,snake_case_ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' ,[2, -1] )
def __lowercase ( snake_case_ : Tuple ) ->str:
'''simple docstring'''
__A : List[str] = [1, 2]
__A : Optional[int] = {'''a''': 1, '''b''': 2}
__A : Union[str, Any] = {'''a''': [1, 2], '''b''': [3, 4]}
__A : str = {'''a''': {'''1''': 1}, '''b''': 2}
__A : Any = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__A : Tuple = [2, 3]
__A : List[str] = {'''a''': 2, '''b''': 3}
__A : Dict = {'''a''': [2, 3], '''b''': [4, 5]}
__A : List[Any] = {'''a''': {'''1''': 2}, '''b''': 3}
__A : List[Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
| 356
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291
| 0
|
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__UpperCamelCase = tuple[int, int]
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
snake_case_ = vertices
snake_case_ = {
(min(lowerCAmelCase__), max(lowerCAmelCase__)): weight for edge, weight in edges.items()
}
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
self.vertices.add(edge[0])
self.vertices.add(edge[1])
snake_case_ = weight
def a_ ( self) -> Graph:
snake_case_ = Graph({min(self.vertices)}, {})
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
while len(subgraph.vertices) < len(self.vertices):
snake_case_ = max(self.edges.values()) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
snake_case_ = edge
snake_case_ = weight
subgraph.add_edge(lowerCAmelCase__, lowerCAmelCase__)
return subgraph
def UpperCAmelCase ( UpperCAmelCase = "p107_network.txt" ) -> int:
snake_case_ = os.path.abspath(os.path.dirname(UpperCAmelCase ) )
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
snake_case_ = {}
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
with open(UpperCAmelCase ) as f:
snake_case_ = f.read().strip().split('\n' )
snake_case_ = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCAmelCase ) ):
for edgea in range(UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
snake_case_ = int(adjaceny_matrix[edgea][edgea] )
snake_case_ = Graph(set(range(len(UpperCAmelCase ) ) ) , UpperCAmelCase )
snake_case_ = graph.prims_algorithm()
snake_case_ = sum(graph.edges.values() )
snake_case_ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 69
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> Optional[int]:
super().__init__(_a )
_A : Union[str, Any] = RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> str:
super().__init__(_a )
_A : Any = config.num_labels
_A : Dict = config.num_hidden_layers
_A : List[str] = DeeRobertaModel(_a )
_A : int = nn.Dropout(config.hidden_dropout_prob )
_A : int = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any:
_A : Optional[int] = self.num_layers
try:
_A : List[str] = self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
_A : List[str] = outputs[1]
_A : List[str] = self.dropout(_a )
_A : Optional[Any] = self.classifier(_a )
_A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_A : List[Any] = e.message
_A : Optional[int] = e.exit_layer
_A : Optional[int] = outputs[0]
if not self.training:
_A : int = entropy(_a )
_A : int = []
_A : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_A : Union[str, Any] = MSELoss()
_A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_A : Optional[Any] = []
for highway_exit in outputs[-1]:
_A : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_A : List[str] = MSELoss()
_A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
_A : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_A : int = (loss,) + outputs
if not self.training:
_A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_A : Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 26
| 0
|
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# Checks if the entire collection has been sorted
if len(__SCREAMING_SNAKE_CASE ) <= 1 or n <= 1:
return
insert_next(__SCREAMING_SNAKE_CASE , n - 1 )
rec_insertion_sort(__SCREAMING_SNAKE_CASE , n - 1 )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# Checks order between adjacent elements
if index >= len(__SCREAMING_SNAKE_CASE ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__snake_case , __snake_case : int = (
collection[index],
collection[index - 1],
)
insert_next(__SCREAMING_SNAKE_CASE , index + 1 )
if __name__ == "__main__":
lowercase_ = input("Enter integers separated by spaces: ")
lowercase_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 20
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "encodec"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : Tuple=2_40_00 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=1_28 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=[8, 5, 4, 2] , _lowerCAmelCase : str="weight_norm" , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict="reflect" , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : List[Any] , ):
__snake_case : Optional[int] = target_bandwidths
__snake_case : int = sampling_rate
__snake_case : List[Any] = audio_channels
__snake_case : str = normalize
__snake_case : Union[str, Any] = chunk_length_s
__snake_case : Union[str, Any] = overlap
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_filters
__snake_case : Optional[Any] = num_residual_layers
__snake_case : List[Any] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Union[str, Any] = kernel_size
__snake_case : Optional[int] = last_kernel_size
__snake_case : Optional[Any] = residual_kernel_size
__snake_case : Dict = dilation_growth_rate
__snake_case : int = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : str = compress
__snake_case : Optional[Any] = num_lstm_layers
__snake_case : List[Any] = trim_right_ratio
__snake_case : Any = codebook_size
__snake_case : int = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Tuple ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20
| 1
|
from __future__ import annotations
import time
lowerCamelCase_ = list[tuple[int, int]]
lowerCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = pos_x
UpperCamelCase__ = pos_y
UpperCamelCase__ = (pos_y, pos_x)
UpperCamelCase__ = goal_x
UpperCamelCase__ = goal_y
UpperCamelCase__ = parent
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = Node(start[1] , start[0] , goal[1] , goal[0] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [self.start]
UpperCamelCase__ = False
def UpperCAmelCase_ (self ):
while self.node_queue:
UpperCamelCase__ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCamelCase__ = True
return self.retrace_path(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_successors(SCREAMING_SNAKE_CASE_ )
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE_ )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for action in delta:
UpperCamelCase__ = parent.pos_x + action[1]
UpperCamelCase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.target.pos_y , self.target.pos_x , SCREAMING_SNAKE_CASE_ ) )
return successors
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = node
UpperCamelCase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase__ = current_node.parent
path.reverse()
return path
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = BreadthFirstSearch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = BreadthFirstSearch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = False
def UpperCAmelCase_ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCamelCase__ = self.fwd_bfs.node_queue.pop(0 )
UpperCamelCase__ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCamelCase__ = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = current_bwd_node
UpperCamelCase__ = current_fwd_node
UpperCamelCase__ = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_ )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase_ = (0, 0)
lowerCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase_ = time.time()
lowerCamelCase_ = BreadthFirstSearch(init, goal)
lowerCamelCase_ = bfs.search()
lowerCamelCase_ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
lowerCamelCase_ = time.time()
lowerCamelCase_ = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase_ = bd_bfs.search()
lowerCamelCase_ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 244
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def UpperCAmelCase_ (self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ (self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = TFViTModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCamelCase__ = self.image_size // 2
UpperCamelCase__ = pixel_values[:, :, :image_size, :image_size]
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , interpolate_pos_encoding=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = TFViTForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCamelCase__ = self.image_size // 2
UpperCamelCase__ = pixel_values[:, :, :image_size, :image_size]
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , interpolate_pos_encoding=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = TFViTForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = TFViTModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase_ (self ):
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Layer ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ (self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""tf""" )
# forward pass
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
| 244
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A_ : Optional[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
A_ : Any = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
A_ : List[str] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
A_ : str = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A_ : Union[str, Any] = model(_lowerCamelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1E-3 ) )
@slow
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
A_ : Union[str, Any] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
A_ : Tuple = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
A_ : Union[str, Any] = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A_ : Tuple = model(_lowerCamelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1E-3 ) )
| 4
|
'''simple docstring'''
from collections.abc import Sequence
def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float:
if not arr:
return 0
A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' )
A_ : str = 0.0
for num in arr:
A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 4
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : Dict = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 284
|
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284
| 1
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def a (self : List[str] ):
"""simple docstring"""
__snake_case = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
__snake_case = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__snake_case = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
__snake_case = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
__snake_case = shift_tokens_right(a__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__snake_case = model(a__ , decoder_input_ids=a__ ).logits
__snake_case = optax.softmax_cross_entropy(a__ , onehot(a__ , logits.shape[-1] ) ).mean()
__snake_case = -(labels.shape[-1] * loss.item())
__snake_case = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 238
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 238
| 1
|
"""simple docstring"""
import os
import numpy
import onnx
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] ) -> int:
_snake_case = a.name
_snake_case = b.name
_snake_case = ''''''
_snake_case = ''''''
_snake_case = a == b
_snake_case = name_a
_snake_case = name_b
return res
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> List[str]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__lowerCamelCase , __lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __lowerCamelCase , __lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , __lowerCamelCase , __lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) -> List[str]:
for n in graph_proto.node:
_node_replace_input_with(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ) -> List[str]:
_snake_case = list(model.graph.initializer )
_snake_case = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_snake_case = inits[i].name
_snake_case = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Union[str, Any]:
_snake_case = os.path.dirname(__lowerCamelCase )
_snake_case = os.path.basename(__lowerCamelCase )
_snake_case = onnx.load(os.path.join(__lowerCamelCase , __lowerCamelCase ) )
_snake_case = list(model.graph.initializer )
_snake_case = set()
_snake_case = {}
_snake_case = []
_snake_case = 0
for i in range(len(__lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__lowerCamelCase )
dup_set.add(__lowerCamelCase )
_snake_case = inits[j].data_type
_snake_case = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , __lowerCamelCase )
total_reduced_size += mem_size
_snake_case = inits[i].name
_snake_case = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__lowerCamelCase )
else:
_snake_case = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 10_24 / 10_24 / 10_24 , '''GB''' )
_snake_case = sorted(__lowerCamelCase )
_remove_dup_initializers_from_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = '''optimized_''' + model_file_name
_snake_case = os.path.join(__lowerCamelCase , __lowerCamelCase )
onnx.save(__lowerCamelCase , __lowerCamelCase )
return new_model
| 288
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]:
_snake_case = checkpoints.load_tax_checkpoint(__lowerCamelCase )
_snake_case = flatten_dict(__lowerCamelCase )
return flax_params
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> Optional[int]:
_snake_case = {}
_snake_case = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
_snake_case = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_snake_case = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_snake_case = new_key.replace(__lowerCamelCase , __lowerCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_snake_case = new_key.replace(__lowerCamelCase , __lowerCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __lowerCamelCase )
_snake_case = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __lowerCamelCase )
_snake_case = flax_dict[key]
_snake_case = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_snake_case = torch.from_numpy(converted_dict[key].T )
else:
_snake_case = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False ) -> int:
_snake_case = get_flax_param(__lowerCamelCase )
if not use_large:
_snake_case = PixaStructVisionConfig()
_snake_case = PixaStructTextConfig()
else:
_snake_case = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
_snake_case = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
_snake_case = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__lowerCamelCase )
_snake_case = PixaStructForConditionalGeneration(__lowerCamelCase )
_snake_case = rename_and_convert_flax_params(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
_snake_case = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
_snake_case = PixaStructImageProcessor()
_snake_case = PixaStructProcessor(image_processor=__lowerCamelCase , tokenizer=__lowerCamelCase )
if use_large:
_snake_case = 40_96
_snake_case = True
# mkdir if needed
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
print('''Model saved in {}'''.format(__lowerCamelCase ) )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
UpperCAmelCase__ = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 288
| 1
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase_ : str = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ : Any = "RegNetConfig"
# Base docstring
UpperCAmelCase_ : List[str] = "facebook/regnet-y-040"
UpperCAmelCase_ : Union[str, Any] = [1, 1_088, 7, 7]
# Image classification docstring
UpperCAmelCase_ : Any = "facebook/regnet-y-040"
UpperCAmelCase_ : Dict = "tabby, tabby cat"
UpperCAmelCase_ : Union[str, Any] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 3 , UpperCAmelCase__ = 1 , UpperCAmelCase__ = 1 , UpperCAmelCase__ = "relu" , ):
super().__init__()
A__ = nn.Convad(
UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , stride=UpperCAmelCase__ , padding=kernel_size // 2 , groups=UpperCAmelCase__ , bias=UpperCAmelCase__ , )
A__ = nn.BatchNormad(UpperCAmelCase__ )
A__ = ACTaFN[activation] if activation is not None else nn.Identity()
def __A ( self , UpperCAmelCase__ ):
A__ = self.convolution(UpperCAmelCase__ )
A__ = self.normalization(UpperCAmelCase__ )
A__ = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ ):
super().__init__()
A__ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
A__ = config.num_channels
def __A ( self , UpperCAmelCase__ ):
A__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
A__ = self.embedder(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 2 ):
super().__init__()
A__ = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , stride=UpperCAmelCase__ , bias=UpperCAmelCase__ )
A__ = nn.BatchNormad(UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ ):
A__ = self.convolution(UpperCAmelCase__ )
A__ = self.normalization(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
super().__init__()
A__ = nn.AdaptiveAvgPoolad((1, 1) )
A__ = nn.Sequential(
nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def __A ( self , UpperCAmelCase__ ):
# b c h w -> b c 1 1
A__ = self.pooler(UpperCAmelCase__ )
A__ = self.attention(UpperCAmelCase__ )
A__ = hidden_state * attention
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 ):
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = max(1 , out_channels // config.groups_width )
A__ = (
RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , )
A__ = ACTaFN[config.hidden_act]
def __A ( self , UpperCAmelCase__ ):
A__ = hidden_state
A__ = self.layer(UpperCAmelCase__ )
A__ = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
A__ = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 ):
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = max(1 , out_channels // config.groups_width )
A__ = (
RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , )
A__ = ACTaFN[config.hidden_act]
def __A ( self , UpperCAmelCase__ ):
A__ = hidden_state
A__ = self.layer(UpperCAmelCase__ )
A__ = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
A__ = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 2 , UpperCAmelCase__ = 2 , ):
super().__init__()
A__ = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
A__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , ) , *[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(depth - 1 )] , )
def __A ( self , UpperCAmelCase__ ):
A__ = self.layers(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ ):
super().__init__()
A__ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
A__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ ) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = False , UpperCAmelCase__ = True ):
A__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
A__ = stage_module(UpperCAmelCase__ )
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ )
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Any = RegNetConfig
lowerCAmelCase : Tuple = """regnet"""
lowerCAmelCase : Optional[Any] = """pixel_values"""
lowerCAmelCase : Optional[int] = True
def __A ( self , UpperCAmelCase__ ):
if isinstance(UpperCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=False ):
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = value
UpperCAmelCase_ : Tuple = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase_ : Any = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , _UpperCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ ):
super().__init__(UpperCAmelCase__ )
A__ = config
A__ = RegNetEmbeddings(UpperCAmelCase__ )
A__ = RegNetEncoder(UpperCAmelCase__ )
A__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None ):
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.embedder(UpperCAmelCase__ )
A__ = self.encoder(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
A__ = encoder_outputs[0]
A__ = self.pooler(UpperCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _UpperCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ ):
super().__init__(UpperCAmelCase__ )
A__ = config.num_labels
A__ = RegNetModel(UpperCAmelCase__ )
# classification head
A__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A ( self , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ):
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.regnet(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(UpperCAmelCase__ )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = "single_label_classification"
else:
A__ = "multi_label_classification"
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
if not return_dict:
A__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
| 198
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase ( _A : str , _A : str )-> Any:
"""simple docstring"""
A__ = RobertaPreLayerNormConfig.from_pretrained(
_A , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A__ = torch.load(hf_hub_download(repo_id=_A , filename="pytorch_model.bin" ) )
A__ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A__ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A__ = tensor_value
A__ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_A , config=_A , state_dict=_A )
model.save_pretrained(_A )
# convert tokenizer
A__ = AutoTokenizer.from_pretrained(_A )
tokenizer.save_pretrained(_A )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 198
| 1
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( _lowercase , _lowercase , _lowercase=None ) -> List[Any]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
__UpperCamelCase = nn.Parameter(__snake_case )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
__UpperCamelCase = nn.Parameter(__snake_case )
def _A ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = np.asarray(weights[0] )
__UpperCamelCase = np.asarray(weights[1] )
__UpperCamelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.output.dense , torch.tensor(__snake_case ).view(-1 , __snake_case ).contiguous().transpose(0 , 1 ) , )
def _A ( _lowercase , _lowercase , _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = np.asarray(weights[0] )
__UpperCamelCase = np.asarray(weights[1] )
__UpperCamelCase = np.asarray(weights[2] )
__UpperCamelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.output.dense , torch.tensor(__snake_case ).view(-1 , __snake_case ).contiguous().transpose(0 , 1 ) , )
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = weights[0][0][0]
__UpperCamelCase = np.asarray(layer_norm_a[0] )
__UpperCamelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__snake_case ) , torch.tensor(__snake_case ) , )
# lsh weights + output
__UpperCamelCase = weights[0][1]
if len(__snake_case ) < 4:
set_layer_weights_in_torch_lsh(__snake_case , torch_block.attention , __snake_case )
else:
set_layer_weights_in_torch_local(__snake_case , torch_block.attention , __snake_case )
# intermediate weighs
__UpperCamelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(__snake_case ) == 4:
__UpperCamelCase = intermediate_weights[2]
# layernorm 2
__UpperCamelCase = np.asarray(intermediate_weights[0][0] )
__UpperCamelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__snake_case ) , torch.tensor(__snake_case ) , )
# intermediate dense
__UpperCamelCase = np.asarray(intermediate_weights[1][0] )
__UpperCamelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(__snake_case ) , )
# intermediate out
__UpperCamelCase = np.asarray(intermediate_weights[4][0] )
__UpperCamelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(__snake_case ) , )
def _A ( _lowercase , _lowercase , _lowercase ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = torch_model.reformer
# word embeds
__UpperCamelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__snake_case ) , )
if isinstance(weights[3] , __snake_case ):
__UpperCamelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__UpperCamelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
__UpperCamelCase = nn.Parameter(torch.tensor(__snake_case ) )
__UpperCamelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__snake_case ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__UpperCamelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__snake_case , __snake_case , __snake_case )
# output layer norm
__UpperCamelCase = np.asarray(weights[7][0] )
__UpperCamelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__snake_case ) , torch.tensor(__snake_case ) , )
# output embeddings
__UpperCamelCase = np.asarray(weights[9][0] )
__UpperCamelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(__snake_case ) , )
def _A ( _lowercase , _lowercase , _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = ReformerConfig.from_json_file(__snake_case )
print(f'''Building PyTorch model from configuration: {config}''' )
__UpperCamelCase = ReformerModelWithLMHead(__snake_case )
with open(__snake_case , 'rb' ) as f:
__UpperCamelCase = pickle.load(__snake_case )["""weights"""]
set_model_weights_in_torch(__snake_case , __snake_case , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 310
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Union[str, Any], __snake_case : Dict ) -> Dict:
"""simple docstring"""
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def __lowerCamelCase ( __snake_case : str, __snake_case : int, __snake_case : Dict, __snake_case : int="attention" ) -> str:
"""simple docstring"""
A__ : Union[str, Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
A__ : str =k_tmp.reshape(k_tmp.shape[0], k_tmp.shape[1] * k_tmp.shape[2] )
A__ : List[Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
A__ : Optional[int] =o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1], o_tmp.shape[2] )
A__ : Dict =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
A__ : Dict =q_tmp.reshape(q_tmp.shape[0], q_tmp.shape[1] * q_tmp.shape[2] )
A__ : Union[str, Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
A__ : List[str] =v_tmp.reshape(v_tmp.shape[0], v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Any, __snake_case : Tuple, __snake_case : Optional[Any]=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
A__ : Any =params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
A__ : int =params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
A__ : Optional[Any] =(wi_a, wi_a)
else:
A__ : Optional[int] =params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
A__ : int =params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : str, __snake_case : Any, __snake_case : int ) -> List[Any]:
"""simple docstring"""
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def __lowerCamelCase ( __snake_case : dict, *, __snake_case : int, __snake_case : bool, __snake_case : bool = False ) -> Union[str, Any]:
"""simple docstring"""
A__ : Optional[int] =traverse_util.flatten_dict(variables["""target"""] )
A__ : int ={"""/""".join(__snake_case ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A__ : List[Any] ="""encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""", __snake_case )
A__ : Optional[int] =collections.OrderedDict()
# Shared embeddings.
A__ : List[Any] =old["""token_embedder/embedding"""]
# Encoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
A__ : Optional[Any] =tax_layer_norm_lookup(__snake_case, __snake_case, """encoder""", """pre_attention_layer_norm""" )
A__ , A__ , A__ , A__ : Optional[int] =tax_attention_lookup(__snake_case, __snake_case, """encoder""", """attention""" )
A__ : List[str] =layer_norm
A__ : Dict =k.T
A__ : Optional[int] =o.T
A__ : str =q.T
A__ : Any =v.T
# Block i, layer 1 (MLP).
A__ : List[Any] =tax_layer_norm_lookup(__snake_case, __snake_case, """encoder""", """pre_mlp_layer_norm""" )
A__ , A__ : int =tax_mlp_lookup(__snake_case, __snake_case, """encoder""", __snake_case )
A__ : Optional[int] =layer_norm
if split_mlp_wi:
A__ : List[str] =wi[0].T
A__ : List[str] =wi[1].T
else:
A__ : Optional[int] =wi.T
A__ : Optional[Any] =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A__ : int =tax_relpos_bias_lookup(
__snake_case, __snake_case, """encoder""" ).T
A__ : Optional[int] =old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
A__ : List[Any] =tax_relpos_bias_lookup(
__snake_case, 0, """encoder""" ).T
A__ : Tuple =tax_relpos_bias_lookup(
__snake_case, 0, """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
A__ : List[str] =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_self_attention_layer_norm""" )
A__ , A__ , A__ , A__ : List[str] =tax_attention_lookup(__snake_case, __snake_case, """decoder""", """self_attention""" )
A__ : str =layer_norm
A__ : List[str] =k.T
A__ : int =o.T
A__ : Tuple =q.T
A__ : Optional[Any] =v.T
# Block i, layer 1 (Cross Attention).
A__ : int =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_cross_attention_layer_norm""" )
A__ , A__ , A__ , A__ : Optional[Any] =tax_attention_lookup(__snake_case, __snake_case, """decoder""", """encoder_decoder_attention""" )
A__ : str =layer_norm
A__ : Union[str, Any] =k.T
A__ : str =o.T
A__ : Any =q.T
A__ : str =v.T
# Block i, layer 2 (MLP).
A__ : str =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_mlp_layer_norm""" )
A__ , A__ : Optional[int] =tax_mlp_lookup(__snake_case, __snake_case, """decoder""", __snake_case )
A__ : Dict =layer_norm
if split_mlp_wi:
A__ : List[Any] =wi[0].T
A__ : Union[str, Any] =wi[1].T
else:
A__ : Optional[int] =wi.T
A__ : str =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
A__ : str =tax_relpos_bias_lookup(__snake_case, __snake_case, """decoder""" ).T
A__ : str =old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A__ : Tuple =old["""decoder/logits_dense/kernel"""].T
return new
def __lowerCamelCase ( __snake_case : Dict, __snake_case : bool ) -> Optional[Any]:
"""simple docstring"""
A__ : Any =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A__ : Union[str, Any] =state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A__ : List[str] =state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
A__ : Optional[Any] =state_dict["""shared.weight"""]
return state_dict
def __lowerCamelCase ( __snake_case : str, __snake_case : str, __snake_case : Optional[Any], __snake_case : int, __snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ : str =checkpoints.load_tax_checkpoint(__snake_case )
A__ : Optional[Any] =convert_tax_to_pytorch(
__snake_case, num_layers=config.num_layers, is_encoder_only=__snake_case, scalable_attention=__snake_case )
A__ : str =make_state_dict(__snake_case, __snake_case )
model.load_state_dict(__snake_case, strict=__snake_case )
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Dict, __snake_case : Optional[int], __snake_case : bool = False, __snake_case : bool = False, ) -> Dict:
"""simple docstring"""
A__ : Tuple =MTaConfig.from_json_file(__snake_case )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A__ : List[Any] =UMTaEncoderModel(__snake_case )
else:
A__ : int =UMTaForConditionalGeneration(__snake_case )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__snake_case )
# Verify that we can load the checkpoint.
model.from_pretrained(__snake_case )
print("""Done""" )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
__snake_case : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 134
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
class a ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase = ['pixel_values']
def __init__( self: Dict , UpperCamelCase: Tuple = True , UpperCamelCase: int = None , UpperCamelCase: Optional[Any] = PILImageResampling.BILINEAR , UpperCamelCase: Optional[int] = True , UpperCamelCase: Optional[int] = None , UpperCamelCase: Dict = True , UpperCamelCase: Tuple = 1 / 2_55 , UpperCamelCase: Union[str, Any] = True , UpperCamelCase: str = None , UpperCamelCase: List[Any] = None , **UpperCamelCase: str , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
A__ = size if size is not None else {"""shortest_edge""": 2_56}
A__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
A__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
A__ = get_size_dict(UpperCamelCase )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase ( self: List[str] , UpperCamelCase: int , UpperCamelCase: List[Any] , UpperCamelCase: Dict = PILImageResampling.BICUBIC , UpperCamelCase: Tuple = None , **UpperCamelCase: Tuple , ):
"""simple docstring"""
A__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
A__ = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Tuple , UpperCamelCase: List[Any] , UpperCamelCase: List[Any] = None , **UpperCamelCase: Union[str, Any] , ):
"""simple docstring"""
A__ = get_size_dict(UpperCamelCase )
return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: int , UpperCamelCase: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Dict = None , **UpperCamelCase: Optional[Any] ):
"""simple docstring"""
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Tuple , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: str , UpperCamelCase: str = None , **UpperCamelCase: Dict , ):
"""simple docstring"""
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: str , UpperCamelCase: Optional[Any] = None , UpperCamelCase: Dict = None , UpperCamelCase: List[str] = None , UpperCamelCase: List[Any] = None , UpperCamelCase: Tuple = None , UpperCamelCase: str = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: int = None , UpperCamelCase: Any = None , UpperCamelCase: List[str] = None , UpperCamelCase: List[Any] = None , UpperCamelCase: Optional[int] = ChannelDimension.FIRST , **UpperCamelCase: str , ):
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(UpperCamelCase )
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
A__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
A__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
A__ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 356
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Tuple = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : int = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 69
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Tuple = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Tuple ='dpr'
def __init__( self, lowerCAmelCase=30_522, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=0, lowerCAmelCase="absolute", lowerCAmelCase = 0, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =projection_dim
lowerCamelCase_ =position_embedding_type
| 75
|
'''simple docstring'''
def a_ ( __snake_case : Any , __snake_case : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ =''''''
for i in table:
res += inp[i - 1]
return res
def a_ ( __snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
return data[1:] + data[0]
def a_ ( __snake_case : str , __snake_case : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ =''''''
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =int('''0b''' + data[0] + data[-1] , 2 )
lowerCamelCase_ =int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def a_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =message[:4]
lowerCamelCase_ =message[4:]
lowerCamelCase_ =apply_table(__snake_case , __snake_case )
lowerCamelCase_ =xor(__snake_case , __snake_case )
lowerCamelCase_ =apply_sbox(__snake_case , temp[:4] ) # noqa: E741
lowerCamelCase_ =apply_sbox(__snake_case , temp[4:] )
lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + l # noqa: E741
lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + r
lowerCamelCase_ =apply_table(l + r , __snake_case )
lowerCamelCase_ =xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
a_ : Any = input("""Enter 10 bit key: """)
a_ : Any = input("""Enter 8 bit message: """)
a_ : str = [6, 3, 7, 4, 8, 5, 10, 9]
a_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
a_ : str = [2, 4, 3, 1]
a_ : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7]
a_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
a_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
a_ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
a_ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
a_ : List[Any] = apply_table(key, paa_table)
a_ : str = temp[:5]
a_ : Optional[Any] = temp[5:]
a_ : Tuple = left_shift(left)
a_ : Optional[Any] = left_shift(right)
a_ : str = apply_table(left + right, pa_table)
a_ : Optional[Any] = left_shift(left)
a_ : Tuple = left_shift(right)
a_ : Union[str, Any] = left_shift(left)
a_ : List[str] = left_shift(right)
a_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
a_ : Optional[int] = apply_table(message, IP)
a_ : List[Any] = function(expansion, sa, sa, keya, temp)
a_ : str = temp[4:] + temp[:4]
a_ : List[str] = function(expansion, sa, sa, keya, temp)
a_ : Union[str, Any] = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
a_ : Optional[int] = apply_table(CT, IP)
a_ : List[Any] = function(expansion, sa, sa, keya, temp)
a_ : int = temp[4:] + temp[:4]
a_ : int = function(expansion, sa, sa, keya, temp)
a_ : Optional[int] = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 75
| 1
|
from __future__ import annotations
from cmath import sqrt
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
lowerCAmelCase__ : List[str] = b * b - 4 * a * c
lowerCAmelCase__ : Optional[Any] = (-b + sqrt(A_ )) / (2 * a)
lowerCAmelCase__ : str = (-b - sqrt(A_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = quadratic_roots(a=5 , b=6 , c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 358
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = k_size // 2
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase__ : str = 1 / (2 * pi * sigma) * exp(-(square(_a ) + square(_a )) / (2 * square(_a )) )
return g
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase__ : Any = height - k_size + 1
lowerCAmelCase__ : Tuple = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase__ : int = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase__ : List[str] = 0
for i, j in product(range(_a ) , range(_a ) ):
lowerCAmelCase__ : Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase__ : List[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase__ : List[Any] = gen_gaussian_kernel(_a , _a )
lowerCAmelCase__ : str = ravel(_a )
# reshape and get the dst image
lowerCAmelCase__ : int = dot(_a , _a ).reshape(_a , _a ).astype(_a )
return dst
if __name__ == "__main__":
# read original image
lowerCamelCase = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowerCamelCase = gaussian_filter(gray, 3, sigma=1)
lowerCamelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 211
| 0
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ) -> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
A__ = (
'Wrong input data\'s dimensions... '
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
A__ = (
'Wrong input data\'s shape... '
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
A__ = (
'Input data have different datatype... '
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(snake_case__ )
A__ = []
for value in value_array:
A__ = euclidean(snake_case__ , dataset[0] )
A__ = dataset[0].tolist()
for dataset_value in dataset[1:]:
A__ = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
A__ = temp_dist
A__ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> float:
'''simple docstring'''
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
"""simple docstring"""
from math import ceil
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = list(range(0 , snake_case__ ) )
lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
lowerCamelCase = [i for i in blocks if i not in device_map_blocks]
lowerCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case__ ) )
def a__ ( snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = list(range(snake_case__ ) )
lowerCamelCase = int(ceil(n_layers / len(snake_case__ ) ) )
lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )]
return dict(zip(snake_case__ , snake_case__ ) )
| 291
| 0
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_SCREAMING_SNAKE_CASE = HUGGINGFACE_HUB_CACHE
_SCREAMING_SNAKE_CASE = '''config.json'''
_SCREAMING_SNAKE_CASE = '''diffusion_pytorch_model.bin'''
_SCREAMING_SNAKE_CASE = '''diffusion_flax_model.msgpack'''
_SCREAMING_SNAKE_CASE = '''model.onnx'''
_SCREAMING_SNAKE_CASE = '''diffusion_pytorch_model.safetensors'''
_SCREAMING_SNAKE_CASE = '''weights.pb'''
_SCREAMING_SNAKE_CASE = '''https://huggingface.co'''
_SCREAMING_SNAKE_CASE = default_cache_path
_SCREAMING_SNAKE_CASE = '''diffusers_modules'''
_SCREAMING_SNAKE_CASE = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_SCREAMING_SNAKE_CASE = ['''fp16''', '''non-ema''']
_SCREAMING_SNAKE_CASE = '''.self_attn'''
| 364
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = VOCAB_FILES_NAMES
a : Dict = PRETRAINED_VOCAB_FILES_MAP
a : int = PRETRAINED_INIT_CONFIGURATION
a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] = ElectraTokenizer
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=True ,_lowerCamelCase="[UNK]" ,_lowerCamelCase="[SEP]" ,_lowerCamelCase="[PAD]" ,_lowerCamelCase="[CLS]" ,_lowerCamelCase="[MASK]" ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> List[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,do_lower_case=_lowerCamelCase ,unk_token=_lowerCamelCase ,sep_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,cls_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,tokenize_chinese_chars=_lowerCamelCase ,strip_accents=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_lowerCamelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_lowerCamelCase ,normalizer_state.pop('''type''' ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_lowerCamelCase )
__lowercase = do_lower_case
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> str:
'''simple docstring'''
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 217
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : int = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Any = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= "visual_bert"
def __init__( self ,snake_case=30522 ,snake_case=768 ,snake_case=512 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=2 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=False ,snake_case=True ,snake_case=1 ,snake_case=0 ,snake_case=2 ,**snake_case ,):
'''simple docstring'''
super().__init__(pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,**snake_case )
lowercase : Tuple = vocab_size
lowercase : int = max_position_embeddings
lowercase : Optional[Any] = hidden_size
lowercase : int = visual_embedding_dim
lowercase : Tuple = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Optional[Any] = intermediate_size
lowercase : str = hidden_act
lowercase : Tuple = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : int = type_vocab_size
lowercase : Union[str, Any] = layer_norm_eps
lowercase : Union[str, Any] = bypass_transformer
lowercase : int = special_visual_initialize
| 20
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : List[Any] = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 74
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : List[str] ,lowercase_ : int=1_3 ,lowercase_ : Optional[int]=3_0 ,lowercase_ : int=2 ,lowercase_ : List[Any]=3 ,lowercase_ : str=True ,lowercase_ : int=True ,lowercase_ : str=3_2 ,lowercase_ : Optional[int]=5 ,lowercase_ : Optional[Any]=4 ,lowercase_ : Any=3_7 ,lowercase_ : str="gelu" ,lowercase_ : Any=0.1 ,lowercase_ : List[Any]=0.1 ,lowercase_ : int=1_0 ,lowercase_ : str=0.02 ,):
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : str = image_size
lowerCAmelCase__ : Dict = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : Union[str, Any] = is_training
lowerCAmelCase__ : Optional[int] = use_labels
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : int = (image_size // patch_size) ** 2
lowerCAmelCase__ : Dict = num_patches + 1
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : List[Any] = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,)
return config, pixel_values
def __lowerCAmelCase ( self : Tuple ,lowercase_ : List[Any] ,lowercase_ : Optional[int] ):
lowerCAmelCase__ : Optional[Any] = FlaxViTModel(config=lowercase_ )
lowerCAmelCase__ : Dict = model(lowercase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : int = (self.image_size, self.image_size)
lowerCAmelCase__ : int = (self.patch_size, self.patch_size)
lowerCAmelCase__ : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def __lowerCAmelCase ( self : int ,lowercase_ : List[Any] ,lowercase_ : List[str] ):
lowerCAmelCase__ : Optional[int] = self.type_sequence_label_size
lowerCAmelCase__ : Any = FlaxViTForImageClassification(config=lowercase_ )
lowerCAmelCase__ : Any = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : Tuple = FlaxViTForImageClassification(lowercase_ )
lowerCAmelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : str = model(lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Tuple = FlaxViTModelTester(self )
lowerCAmelCase__ : List[str] = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ ,hidden_size=3_7 )
def __lowerCAmelCase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(lowercase_ )
lowerCAmelCase__ : List[str] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ : Dict = self._prepare_for_class(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Tuple = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : List[Any] ,**lowercase_ : Optional[int] ):
return model(pixel_values=lowercase_ ,**lowercase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ : Optional[Any] = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ : Optional[int] = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ ,lowercase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def __lowerCAmelCase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
lowerCAmelCase__ : Optional[int] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowercase_ )
| 74
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.