code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import sys
SCREAMING_SNAKE_CASE__:List[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowerCamelCase( a = N ):
__a = -sys.maxsize - 1
for i in range(len(a ) - 1_2 ):
__a = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__a = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class snake_case__ ( snake_case_ ):
_snake_case : Any = """big_bird"""
def __init__( self , lowerCamelCase=50358 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=4096 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=66 , lowerCamelCase="block_sparse" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=64 , lowerCamelCase=3 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , sep_token_id=lowerCamelCase , **lowerCamelCase , )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
__a = classifier_dropout
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | 1 |
"""simple docstring"""
import re
def _lowerCamelCase( a ):
__a = re.compile(
R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" )
return bool(re.search(a , a ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
_snake_case : Optional[int] = None
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase = 16 , lowerCamelCase = 88 , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 0.0 , lowerCamelCase = 32 , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "geglu" , lowerCamelCase = None , ):
super().__init__()
__a = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCamelCase , attention_head_dim=lowerCamelCase , in_channels=lowerCamelCase , num_layers=lowerCamelCase , dropout=lowerCamelCase , norm_num_groups=lowerCamelCase , cross_attention_dim=lowerCamelCase , attention_bias=lowerCamelCase , sample_size=lowerCamelCase , num_vector_embeds=lowerCamelCase , activation_fn=lowerCamelCase , num_embeds_ada_norm=lowerCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__a = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__a = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__a = [1, 0]
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase = True , ):
__a = hidden_states
__a = []
__a = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__a = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__a = self.transformer_index_for_condition[i]
__a = self.transformers[transformer_index](
lowerCamelCase , encoder_hidden_states=lowerCamelCase , timestep=lowerCamelCase , cross_attention_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__a = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__a = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCamelCase )
| 67 | """simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
__a , *__a = self.shape
__a = int(np.prod(lowerCamelCase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(lowerCamelCase )
__a = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCamelCase ):
__a , *__a , __a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(lowerCamelCase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(lowerCamelCase , -1 , 2 )
__a = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( a ):
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a = np.array([np.sin(a ), np.cos(a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(a ), -np.sin(a ), 0.0] )
__a = np.cross(a , a )
origins.append(a )
xs.append(a )
ys.append(a )
zs.append(a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a , axis=0 ) ).float() , width=a , height=a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a )) , )
| 67 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
SCREAMING_SNAKE_CASE__:str = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 67 | """simple docstring"""
def _lowerCamelCase( a ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowerCamelCase( a ):
__a = 0
__a = number
while duplicate > 0:
__a , __a = divmod(a , 1_0 )
fact_sum += factorial(a )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
SCREAMING_SNAKE_CASE__:Optional[Any] = int(input("""Enter number: """).strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 67 | 1 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
SCREAMING_SNAKE_CASE__:str = logging.get_logger(__name__)
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = UNetaDModel
_snake_case : Optional[Any] = """sample"""
@property
def a__ ( self ):
__a = 4
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase )
__a = torch.tensor([10] ).to(lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def a__ ( self ):
return (3, 32, 32)
@property
def a__ ( self ):
return (3, 32, 32)
def a__ ( self ):
__a = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : int = UNetaDModel
_snake_case : List[str] = """sample"""
@property
def a__ ( self ):
__a = 4
__a = 4
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase )
__a = torch.tensor([10] ).to(lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def a__ ( self ):
return (4, 32, 32)
@property
def a__ ( self ):
return (4, 32, 32)
def a__ ( self ):
__a = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
__a = self.dummy_input
return init_dict, inputs_dict
def a__ ( self ):
__a , __a = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase )
__a = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def a__ ( self ):
__a , __a = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowerCamelCase )
model.to(lowerCamelCase )
__a = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def a__ ( self ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
__a , __a = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowerCamelCase )
model_accelerate.to(lowerCamelCase )
model_accelerate.eval()
__a = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = noise.to(lowerCamelCase )
__a = torch.tensor([10] * noise.shape[0] ).to(lowerCamelCase )
__a = model_accelerate(lowerCamelCase , lowerCamelCase )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__a , __a = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=lowerCamelCase , low_cpu_mem_usage=lowerCamelCase )
model_normal_load.to(lowerCamelCase )
model_normal_load.eval()
__a = model_normal_load(lowerCamelCase , lowerCamelCase )["sample"]
assert torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1E-3 )
def a__ ( self ):
__a = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(lowerCamelCase )
__a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = noise.to(lowerCamelCase )
__a = torch.tensor([10] * noise.shape[0] ).to(lowerCamelCase )
with torch.no_grad():
__a = model(lowerCamelCase , lowerCamelCase ).sample
__a = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__a = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1E-3 ) )
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Dict = UNetaDModel
_snake_case : Tuple = """sample"""
@property
def a__ ( self , lowerCamelCase=(32, 32) ):
__a = 4
__a = 3
__a = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase )
__a = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def a__ ( self ):
return (3, 32, 32)
@property
def a__ ( self ):
return (3, 32, 32)
def a__ ( self ):
__a = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
__a = self.dummy_input
return init_dict, inputs_dict
@slow
def a__ ( self ):
__a , __a = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase )
__a = self.dummy_input
__a = floats_tensor((4, 3) + (256, 256) ).to(lowerCamelCase )
__a = noise
__a = model(**lowerCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def a__ ( self ):
__a = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(lowerCamelCase )
__a = 4
__a = 3
__a = (256, 256)
__a = torch.ones((batch_size, num_channels) + sizes ).to(lowerCamelCase )
__a = torch.tensor(batch_size * [1E-4] ).to(lowerCamelCase )
with torch.no_grad():
__a = model(lowerCamelCase , lowerCamelCase ).sample
__a = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__a = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1E-2 ) )
def a__ ( self ):
__a = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(lowerCamelCase )
__a = 4
__a = 3
__a = (32, 32)
__a = torch.ones((batch_size, num_channels) + sizes ).to(lowerCamelCase )
__a = torch.tensor(batch_size * [1E-4] ).to(lowerCamelCase )
with torch.no_grad():
__a = model(lowerCamelCase , lowerCamelCase ).sample
__a = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__a = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1E-2 ) )
def a__ ( self ):
# not required for this model
pass
| 67 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__:Dict = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:List[Any] = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( a , a , a ):
__a = OmegaConf.load(a )
__a = torch.load(a , map_location="cpu" )["model"]
__a = list(state_dict.keys() )
# extract state_dict for VQVAE
__a = {}
__a = "first_stage_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
# extract state_dict for UNetLDM
__a = {}
__a = "model.diffusion_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
__a = config.model.params.first_stage_config.params
__a = config.model.params.unet_config.params
__a = VQModel(**a ).eval()
vqvae.load_state_dict(a )
__a = UNetLDMModel(**a ).eval()
unet.load_state_dict(a )
__a = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , )
__a = LDMPipeline(a , a , a )
pipeline.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 67 | 1 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase( a , a , a , a ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def _lowerCamelCase( a , a , a , a , a=True ):
model.train()
__a = model(a )
__a = F.mse_loss(a , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(a )
def _lowerCamelCase( a , a=False ):
set_seed(4_2 )
__a = RegressionModel()
__a = deepcopy(a )
__a = RegressionDataset(length=8_0 )
__a = DataLoader(a , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__a = AdamW(params=model.parameters() , lr=1E-3 )
__a = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__a = LambdaLR(a , lr_lambda=lambda a : epoch**0.65 )
__a = LambdaLR(a , lr_lambda=lambda a : epoch**0.65 )
# Make a copy of `model`
if sched:
__a , __a , __a , __a = accelerator.prepare(a , a , a , a )
else:
__a , __a = accelerator.prepare(a , a )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase( a ):
# Test when on a single CPU or GPU that the context manager does nothing
__a , __a , __a = get_training_setup(a )
# Use a single batch
__a , __a = next(iter(a ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a , a , a , a )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a ):
step_model(a , a , a , a )
else:
# Sync grads
step_model(a , a , a , a )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(a , a , a , a )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__a = ddp_input[torch.randperm(len(a ) )]
def _lowerCamelCase( a ):
# Test on distributed setup that context manager behaves properly
__a , __a , __a = get_training_setup(a )
# Use a single batch
__a , __a = next(iter(a ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a , a , a , a )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a ):
step_model(a , a , a , a )
else:
# Sync grads
step_model(a , a , a , a )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__a = ddp_input[torch.randperm(len(a ) )]
def _lowerCamelCase( a=False , a=False ):
__a = Accelerator(
split_batches=a , dispatch_batches=a , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__a , __a , __a = get_training_setup(a )
for iteration, batch in enumerate(a ):
__a , __a = batch.values()
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a , a , a , a , a )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(a ):
step_model(a , a , a , a )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(a ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__a = ddp_input[torch.randperm(len(a ) )]
GradientState._reset_state()
def _lowerCamelCase( a=False , a=False ):
__a = Accelerator(
split_batches=a , dispatch_batches=a , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__a , __a , __a , __a , __a , __a , __a = get_training_setup(a , a )
for iteration, batch in enumerate(a ):
__a , __a = batch.values()
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(a , a , a , a , a )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(a )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(a ):
step_model(a , a , a , a )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
__a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(a ))
if accelerator.num_processes > 1:
check_model_parameters(a , a , a , a )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def _lowerCamelCase( ):
__a = Accelerator()
__a = RegressionDataset(length=8_0 )
__a = DataLoader(a , batch_size=1_6 )
__a = RegressionDataset(length=9_6 )
__a = DataLoader(a , batch_size=1_6 )
__a , __a = accelerator.prepare(a , a )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(a ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a )
if iteration < len(a ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(a ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a )
if batch_num < len(a ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase( ):
__a = Accelerator()
__a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(a )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(a )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(a , a )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(a , a )
def _lowerCamelCase( a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = name
__a = value
__a = weight
def __repr__( self ):
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def a__ ( self ):
return self.value
def a__ ( self ):
return self.name
def a__ ( self ):
return self.weight
def a__ ( self ):
return self.value / self.weight
def _lowerCamelCase( a , a , a ):
__a = []
for i in range(len(a ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _lowerCamelCase( a , a , a ):
__a = sorted(a , key=a , reverse=a )
__a = []
__a , __a = 0.0, 0.0
for i in range(len(a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _lowerCamelCase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=9 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=8 , lowerCamelCase=0.1 , lowerCamelCase=0.002 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=None , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = encoder_seq_length
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = d_ff
__a = relative_attention_num_buckets
__a = dropout_rate
__a = initializer_factor
__a = eos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = None
__a = decoder_layers
def a__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
__a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
__a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
__a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__a = input_ids.clamp(self.pad_token_id + 1 )
__a = decoder_input_ids.clamp(self.pad_token_id + 1 )
__a = self.get_config()
__a = config.num_attention_heads
__a = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
__a = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = result.last_hidden_state
__a = result.past_key_values
__a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
__a = model(**lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_snake_case : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = True
_snake_case : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_snake_case : Optional[Any] = [0.8, 0.9]
def a__ ( self ):
__a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
__a = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def a__ ( self ):
__a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__a = self.model_tester.prepare_config_and_inputs()
__a = config_and_inputs[0]
__a = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
__a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
__a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__a = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
__a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def a__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def a__ ( self ):
__a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCamelCase ).to(lowerCamelCase )
__a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCamelCase , legacy=lowerCamelCase )
__a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__a = tokenizer(lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase ).input_ids
# fmt: off
__a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
__a = model.generate(input_ids.to(lowerCamelCase ) )
__a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
SCREAMING_SNAKE_CASE__:List[Any] = False
SCREAMING_SNAKE_CASE__:str = False
def _lowerCamelCase( a ):
return TrainCommand(a )
class snake_case__ ( snake_case_ ):
@staticmethod
def a__ ( lowerCamelCase ):
__a = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=lowerCamelCase , required=lowerCamelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=lowerCamelCase , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=lowerCamelCase , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=lowerCamelCase , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=lowerCamelCase , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=lowerCamelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=lowerCamelCase , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=lowerCamelCase , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=lowerCamelCase , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=lowerCamelCase , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=lowerCamelCase , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=lowerCamelCase , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=lowerCamelCase , default=1E-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=lowerCamelCase )
def __init__( self , lowerCamelCase ):
__a = logging.get_logger("transformers-cli/training" )
__a = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=lowerCamelCase )
__a = args.output
__a = args.column_label
__a = args.column_text
__a = args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
__a = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
__a = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__a = None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
__a = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__a = args.validation_split
__a = args.train_batch_size
__a = args.valid_batch_size
__a = args.learning_rate
__a = args.adam_epsilon
def a__ ( self ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def a__ ( self ):
raise NotImplementedError
def a__ ( self ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 67 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a ):
# Initialise PyTorch model
__a = MobileBertConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
__a = MobileBertForPreTraining(a )
# Load weights from tf checkpoint
__a = load_tf_weights_in_mobilebert(a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 67 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = """data2vec-text"""
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__a = input_file.read()
__a = regexp.search(lowerCamelCase )
return match
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a = regexp.finditer(lowerCamelCase )
__a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
__a = list(range(len(a ) ) )
__a = [v / w for v, w in zip(a , a )]
index.sort(key=lambda a : ratio[i] , reverse=a )
__a = 0
__a = [0] * len(a )
for i in index:
if weight[i] <= capacity:
__a = 1
max_value += value[i]
capacity -= weight[i]
else:
__a = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a , a ):
__a = [1]
for i in range(2 , a ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__a = []
__a = list(range(a ) )
# Find permutation
while factorials:
__a = factorials.pop()
__a , __a = divmod(a , a )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE__:Optional[int] = tuple[int, int]
class snake_case__ :
def __init__( self ):
__a = []
__a = set()
def a__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def a__ ( self ):
return len(self.elements ) == 0
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def a__ ( self , lowerCamelCase ):
if item in self.set:
self.set.remove(lowerCamelCase )
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def a__ ( self ):
return self.elements[0][1]
def a__ ( self ):
((__a) , (__a)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _lowerCamelCase( a , a ):
# euclidean distance
__a = np.array(a )
__a = np.array(a )
return np.linalg.norm(a - b )
def _lowerCamelCase( a , a ):
# integer division by time variable
return consistent_heuristic(a , a ) // t
def _lowerCamelCase( a , a ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase( a , a , a , a ):
__a = g_function[start] + Wa * heuristics[i](a , a )
return ans
def _lowerCamelCase( a , a , a ):
__a = np.chararray((n, n) )
for i in range(a ):
for j in range(a ):
__a = "*"
for i in range(a ):
for j in range(a ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(a ):
for j in range(a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__a = back_pointer[goal]
while x != start:
print(a , end=" " )
__a = back_pointer[x]
print(a )
sys.exit()
def _lowerCamelCase( a ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase( a , a , a , a , a , a , a , a , ):
for itera in range(a ):
open_list[itera].remove_element(a )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a )
__a = -1
__a = float("inf" )
if valid(a ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(a , key(a , 0 , a , a ) )
if neighbours not in close_list_inad:
for var in range(1 , a ):
if key(a , a , a , a ) <= Wa * key(
a , 0 , a , a ):
open_list[j].put(
a , key(a , a , a , a ) )
def _lowerCamelCase( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE__:Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE__:str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE__:int = make_common_ground()
SCREAMING_SNAKE_CASE__:List[str] = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE__:str = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 20
SCREAMING_SNAKE_CASE__:Dict = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[Any] = (n - 1, n - 1)
SCREAMING_SNAKE_CASE__:List[str] = 1
def _lowerCamelCase( a , a , a ):
__a = {start: 0, goal: float("inf" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(a ):
open_list.append(PriorityQueue() )
open_list[i].put(a , key(a , a , a , a ) )
__a = []
__a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a , __a = open_list[i].top_show()
visited.add(a )
expand_state(
a , a , a , a , a , a , a , a , )
close_list_inad.append(a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a = open_list[0].top_show()
visited.add(a )
expand_state(
a , 0 , a , a , a , a , a , a , )
close_list_anchor.append(a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a , a ):
__a = len(a )
__a = []
for i in range(len(a ) - pat_len + 1 ):
__a = True
for j in range(a ):
if s[i + j] != pattern[j]:
__a = False
break
if match_found:
position.append(a )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
| 67 | """simple docstring"""
SCREAMING_SNAKE_CASE__:Any = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase( a ):
__a = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__a = Stack()
__a = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a ) )
elif i in operators:
# RULE 2
operator_stack.push(a )
elif i == ")":
# RULE 4
__a = operator_stack.peek()
operator_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operators[opr](a , a )
operand_stack.push(a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 67 | 1 |
"""simple docstring"""
from math import factorial, pi
def _lowerCamelCase( a , a = 3_0 ):
if not isinstance(a , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(a , a ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
__a = float(a )
__a = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(a ) )
def _lowerCamelCase( a , a = 3_0 ):
if not isinstance(a , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(a , a ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
__a = float(a )
__a = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 67 | """simple docstring"""
from math import pi
def _lowerCamelCase( a , a ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 67 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
SCREAMING_SNAKE_CASE__:Any = False
class snake_case__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__a = torch.manual_seed(0 )
__a = pipe.dual_guided(
prompt="first prompt" , image=lowerCamelCase , text_to_image_strength=0.75 , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase )
__a = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = generator.manual_seed(0 )
__a = pipe.dual_guided(
prompt="first prompt" , image=lowerCamelCase , text_to_image_strength=0.75 , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a__ ( self ):
__a = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = "cyberpunk 2077"
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__a = torch.manual_seed(0 )
__a = pipe.dual_guided(
prompt=lowerCamelCase , image=lowerCamelCase , text_to_image_strength=0.75 , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
__a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__a = "A painting of a squirrel eating a burger "
__a = torch.manual_seed(0 )
__a = pipe.text_to_image(
prompt=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__a = pipe.image_variation(lowerCamelCase , generator=lowerCamelCase , output_type="numpy" ).images
__a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 67 | """simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Dict = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = IMAGENET_DEFAULT_MEAN , lowerCamelCase = IMAGENET_DEFAULT_STD , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a = int((256 / 224) * size["shortest_edge"] )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
lowerCamelCase , size=(size_dict["height"], size_dict["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(lowerCamelCase , lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(lowerCamelCase , lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = """trajectory_transformer"""
_snake_case : Any = ["""past_key_values"""]
_snake_case : Dict = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCamelCase=100 , lowerCamelCase=5 , lowerCamelCase=1 , lowerCamelCase=1 , lowerCamelCase=249 , lowerCamelCase=6 , lowerCamelCase=17 , lowerCamelCase=25 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase=128 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0006 , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=1 , lowerCamelCase=True , lowerCamelCase=1 , lowerCamelCase=50256 , lowerCamelCase=50256 , **lowerCamelCase , ):
__a = vocab_size
__a = action_weight
__a = reward_weight
__a = value_weight
__a = max_position_embeddings
__a = block_size
__a = action_dim
__a = observation_dim
__a = transition_dim
__a = learning_rate
__a = n_layer
__a = n_head
__a = n_embd
__a = embd_pdrop
__a = attn_pdrop
__a = resid_pdrop
__a = initializer_range
__a = layer_norm_eps
__a = kaiming_initializer_range
__a = use_cache
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
| 67 | """simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=None , lowerCamelCase=2 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = ViTForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.type_sequence_label_size
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case : List[Any] = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case : int = True
_snake_case : int = False
_snake_case : str = False
_snake_case : Optional[Any] = False
def a__ ( self ):
__a = ViTModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__a = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCamelCase )
__a = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(lowerCamelCase , interpolate_pos_encoding=lowerCamelCase )
# verify the logits
__a = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase )
__a = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a__ ( self ):
__a = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a = model(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:List[Any] = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = DetaImageProcessor if is_vision_available() else None
def a__ ( self ):
__a = DetaImageProcessingTester(self )
@property
def a__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def a__ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 39769, "annotations": target}
# encode them
__a = DetaImageProcessor()
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def a__ ( self ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetaImageProcessor(format="coco_panoptic" )
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__a = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 67 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__:Dict = logging.getLogger()
def _lowerCamelCase( ):
__a = argparse.ArgumentParser()
parser.add_argument("-f" )
__a = parser.parse_args()
return args.f
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ):
__a = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__:Dict = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = ["""input_ids""", """attention_mask"""]
_snake_case : Dict = GPTaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
__a = kwargs.pop("add_bos_token" , lowerCamelCase )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
from manim import *
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = Rectangle(height=0.5 , width=0.5 )
__a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a = Rectangle(height=0.25 , width=0.25 )
__a = [mem.copy() for i in range(6 )]
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
__a = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
__a = VGroup(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
__a = Text("CPU" , font_size=24 )
__a = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase )
__a = [mem.copy() for i in range(4 )]
__a = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
__a = Text("GPU" , font_size=24 )
__a = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase )
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
__a = Text("Model" , font_size=24 )
__a = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase )
__a = []
__a = []
for i, rect in enumerate(lowerCamelCase ):
__a = fill.copy().set_fill(lowerCamelCase , opacity=0.8 )
target.move_to(lowerCamelCase )
model_arr.append(lowerCamelCase )
__a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase )
self.add(*lowerCamelCase , *lowerCamelCase )
__a = [meta_mem.copy() for i in range(6 )]
__a = [meta_mem.copy() for i in range(6 )]
__a = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
__a = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
__a = VGroup(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
__a = Text("Disk" , font_size=24 )
__a = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase , lowerCamelCase )
__a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase , lowerCamelCase )
__a = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase )
__a = MarkupText(
F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase ) )
__a = Square(0.3 )
input.set_fill(lowerCamelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase , buff=0.5 )
self.play(Write(lowerCamelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase ) )
self.play(FadeOut(lowerCamelCase ) )
__a = Arrow(start=lowerCamelCase , end=lowerCamelCase , color=lowerCamelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__a = MarkupText(
F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase , run_time=3 ) )
__a = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(lowerCamelCase ) , Circumscribe(model_arr[0] , color=lowerCamelCase , **lowerCamelCase ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase , **lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase , **lowerCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__a = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__a = AnimationGroup(
FadeOut(lowerCamelCase , run_time=0.5 ) , MoveToTarget(lowerCamelCase , run_time=0.5 ) , FadeIn(lowerCamelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__a = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase , **lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase , **lowerCamelCase ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase , **lowerCamelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase , **lowerCamelCase ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase , **lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase , **lowerCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__a = a_c
__a = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase ) , FadeOut(lowerCamelCase , run_time=0.5 ) , )
__a = MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase , run_time=3 ) , MoveToTarget(lowerCamelCase ) )
self.wait()
| 67 | """simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCamelCase( a , a , a ):
__a = hf_hub_url(repo_id=a , path=a , revision=a )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a )}"
| 67 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
_snake_case : Optional[int] = None
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | """simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
if len(a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__a = (left + right) >> 1 # the middle
__a = find_max(a , a , a ) # find max in range[left, mid]
__a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 67 | 1 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowerCamelCase( a , a , a , a ):
__a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
__a = F"{src_lang}-{tgt_lang}"
__a = F"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a , exist_ok=a )
__a = os.path.join(a , "README.md" )
print(F"Generating {path}" )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(a )
# make sure we are under the root of the project
SCREAMING_SNAKE_CASE__:List[str] = Path(__file__).resolve().parent.parent.parent
SCREAMING_SNAKE_CASE__:Dict = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
SCREAMING_SNAKE_CASE__:Optional[Any] = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class snake_case__ ( snake_case_ ):
_snake_case : Any = """big_bird"""
def __init__( self , lowerCamelCase=50358 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=4096 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=66 , lowerCamelCase="block_sparse" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=64 , lowerCamelCase=3 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , sep_token_id=lowerCamelCase , **lowerCamelCase , )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
__a = classifier_dropout
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | 1 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE__:str = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
SCREAMING_SNAKE_CASE__:Dict = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
SCREAMING_SNAKE_CASE__:Tuple = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def a__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = 0.0
for i, j in zip(lowerCamelCase , lowerCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCamelCase , lowerCamelCase ) else 0.0
__a = n_correct / len(lowerCamelCase )
return {
"accuracy": accuracy,
}
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
_snake_case : Optional[int] = None
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
super().__init__(*lowerCamelCase , **lowerCamelCase )
__a = {}
def a__ ( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ):
__a = super().add_tokens(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if num_added_tokens == 0:
raise ValueError(
F"The tokenizer already contains the token {placeholder_token}. Please pass a different"
" `placeholder_token` that is not already in the tokenizer." )
def a__ ( self , lowerCamelCase , *lowerCamelCase , lowerCamelCase=1 , **lowerCamelCase ):
__a = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
output.append(lowerCamelCase )
else:
__a = []
for i in range(lowerCamelCase ):
__a = placeholder_token + F"_{i}"
self.try_adding_tokens(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
output.append(lowerCamelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"The tokenizer already has placeholder token {token} that can get confused with"
F" {placeholder_token}keep placeholder tokens independent" )
__a = output
def a__ ( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1.0 ):
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = []
for i in range(len(lowerCamelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__a = self.token_map[placeholder_token]
__a = tokens[: 1 + int(len(lowerCamelCase ) * prop_tokens_to_load )]
if vector_shuffle:
__a = copy.copy(lowerCamelCase )
random.shuffle(lowerCamelCase )
__a = text.replace(lowerCamelCase , " ".join(lowerCamelCase ) )
return text
def __call__( self , lowerCamelCase , *lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1.0 , **lowerCamelCase ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase , vector_shuffle=lowerCamelCase , prop_tokens_to_load=lowerCamelCase ) , *lowerCamelCase , **lowerCamelCase , )
def a__ ( self , lowerCamelCase , *lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1.0 , **lowerCamelCase ):
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase , vector_shuffle=lowerCamelCase , prop_tokens_to_load=lowerCamelCase ) , *lowerCamelCase , **lowerCamelCase , )
| 67 | """simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
__a , *__a = self.shape
__a = int(np.prod(lowerCamelCase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(lowerCamelCase )
__a = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCamelCase ):
__a , *__a , __a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(lowerCamelCase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(lowerCamelCase , -1 , 2 )
__a = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( a ):
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a = np.array([np.sin(a ), np.cos(a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(a ), -np.sin(a ), 0.0] )
__a = np.cross(a , a )
origins.append(a )
xs.append(a )
ys.append(a )
zs.append(a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a , axis=0 ) ).float() , width=a , height=a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a )) , )
| 67 | 1 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__:int = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__:Tuple = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__:int = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__:str = """CompVis/stable-diffusion-v1-4"""
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ):
super()._init_()
__a = StableDiffusionPipeline.from_pretrained(lowerCamelCase )
__a = StableDiffusionPipeline.from_pretrained(lowerCamelCase )
__a = StableDiffusionPipeline.from_pretrained(lowerCamelCase )
__a = StableDiffusionPipeline(
vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , requires_safety_checker=lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def a__ ( self ):
return {k: getattr(self , lowerCamelCase ) for k in self.config.keys() if not k.startswith("_" )}
def a__ ( self , lowerCamelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def a__ ( self ):
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def a__ ( self , lowerCamelCase , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 50 , lowerCamelCase = 7.5 , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 0.0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = 1 , **lowerCamelCase , ):
return self.pipea(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
@torch.no_grad()
def a__ ( self , lowerCamelCase , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 50 , lowerCamelCase = 7.5 , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 0.0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = 1 , **lowerCamelCase , ):
return self.pipea(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
@torch.no_grad()
def a__ ( self , lowerCamelCase , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 50 , lowerCamelCase = 7.5 , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 0.0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = 1 , **lowerCamelCase , ):
return self.pipea(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
@torch.no_grad()
def a__ ( self , lowerCamelCase , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 50 , lowerCamelCase = 7.5 , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 0.0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = 1 , **lowerCamelCase , ):
return self.pipea(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
@torch.no_grad()
def a__ ( self , lowerCamelCase , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 50 , lowerCamelCase = 7.5 , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 0.0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = 1 , **lowerCamelCase , ):
__a = "cuda" if torch.cuda.is_available() else "cpu"
self.to(lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
__a = self.textaimg_sda_a(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
__a = self.textaimg_sda_a(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
__a = self.textaimg_sda_a(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
__a = self.textaimg_sda_a(
prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 67 | """simple docstring"""
def _lowerCamelCase( a ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowerCamelCase( a ):
__a = 0
__a = number
while duplicate > 0:
__a , __a = divmod(a , 1_0 )
fact_sum += factorial(a )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
SCREAMING_SNAKE_CASE__:Optional[Any] = int(input("""Enter number: """).strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a , a ):
__a = ""
for i in table:
res += inp[i - 1]
return res
def _lowerCamelCase( a ):
return data[1:] + data[0]
def _lowerCamelCase( a , a ):
__a = ""
for i in range(len(a ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _lowerCamelCase( a , a ):
__a = int("0b" + data[0] + data[-1] , 2 )
__a = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _lowerCamelCase( a , a , a , a , a ):
__a = message[:4]
__a = message[4:]
__a = apply_table(a , a )
__a = xor(a , a )
__a = apply_sbox(a , temp[:4] ) # noqa: E741
__a = apply_sbox(a , temp[4:] )
__a = "0" * (2 - len(a )) + l # noqa: E741
__a = "0" * (2 - len(a )) + r
__a = apply_table(l + r , a )
__a = xor(a , a )
return temp + right
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = input("""Enter 10 bit key: """)
SCREAMING_SNAKE_CASE__:List[str] = input("""Enter 8 bit message: """)
SCREAMING_SNAKE_CASE__:Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
SCREAMING_SNAKE_CASE__:Dict = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
SCREAMING_SNAKE_CASE__:Dict = [2, 4, 3, 1]
SCREAMING_SNAKE_CASE__:str = [2, 6, 3, 1, 4, 8, 5, 7]
SCREAMING_SNAKE_CASE__:int = [4, 1, 3, 5, 7, 2, 8, 6]
SCREAMING_SNAKE_CASE__:List[str] = [4, 1, 2, 3, 2, 3, 4, 1]
SCREAMING_SNAKE_CASE__:Union[str, Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
SCREAMING_SNAKE_CASE__:Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
SCREAMING_SNAKE_CASE__:Dict = apply_table(key, paa_table)
SCREAMING_SNAKE_CASE__:List[str] = temp[:5]
SCREAMING_SNAKE_CASE__:Any = temp[5:]
SCREAMING_SNAKE_CASE__:str = left_shift(left)
SCREAMING_SNAKE_CASE__:Optional[int] = left_shift(right)
SCREAMING_SNAKE_CASE__:Optional[Any] = apply_table(left + right, pa_table)
SCREAMING_SNAKE_CASE__:Tuple = left_shift(left)
SCREAMING_SNAKE_CASE__:Dict = left_shift(right)
SCREAMING_SNAKE_CASE__:Dict = left_shift(left)
SCREAMING_SNAKE_CASE__:int = left_shift(right)
SCREAMING_SNAKE_CASE__:List[Any] = apply_table(left + right, pa_table)
# encryption
SCREAMING_SNAKE_CASE__:Dict = apply_table(message, IP)
SCREAMING_SNAKE_CASE__:List[str] = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__:str = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__:str = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__:Tuple = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
SCREAMING_SNAKE_CASE__:Optional[Any] = apply_table(CT, IP)
SCREAMING_SNAKE_CASE__:str = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__:int = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__:Optional[int] = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__:int = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 67 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | 1 |
"""simple docstring"""
from typing import Any
def _lowerCamelCase( a ):
if not input_list:
return []
__a = [input_list.count(a ) for value in input_list]
__a = max(a ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(a ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( a , a , a ):
__a = OmegaConf.load(a )
__a = torch.load(a , map_location="cpu" )["model"]
__a = list(state_dict.keys() )
# extract state_dict for VQVAE
__a = {}
__a = "first_stage_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
# extract state_dict for UNetLDM
__a = {}
__a = "model.diffusion_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
__a = config.model.params.first_stage_config.params
__a = config.model.params.unet_config.params
__a = VQModel(**a ).eval()
vqvae.load_state_dict(a )
__a = UNetLDMModel(**a ).eval()
unet.load_state_dict(a )
__a = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , )
__a = LDMPipeline(a , a , a )
pipeline.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 67 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
def _lowerCamelCase( a ):
__a = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__a = [1_4_4, 1_9_2, 2_4_0]
__a = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
__a = [9_6, 1_2_0, 1_4_4]
__a = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
__a = [6_4, 8_0, 9_6]
__a = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
__a = 0.05
__a = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__a = 5_1_2
__a = 1_6
__a = 2_1
__a = "pascal-voc-id2label.json"
else:
__a = 1_0_0_0
__a = "imagenet-1k-id2label.json"
__a = "huggingface/label-files"
__a = json.load(open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) )
__a = {int(a ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase( a , a=False ):
for i in range(1 , 6 ):
if F"layer_{i}." in name:
__a = name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
__a = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
__a = name.replace(".block." , "." )
if "exp_1x1" in name:
__a = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
__a = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__a = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__a = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
__a = name.replace(".norm." , ".normalization." )
if ".conv." in name:
__a = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
__a = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
__a = name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
__a = name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
__a = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__a = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__a = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
__a = name.replace(F".global_rep.{i}.weight" , ".layernorm.weight" )
if F".global_rep.{i}.bias" in name:
__a = name.replace(F".global_rep.{i}.bias" , ".layernorm.bias" )
if ".global_rep." in name:
__a = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
__a = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__a = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__a = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__a = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__a = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
__a = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
__a = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
__a = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
__a = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__a = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
__a = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__a = "mobilevit." + name
return name
def _lowerCamelCase( a , a , a=False ):
if base_model:
__a = ""
else:
__a = "mobilevit."
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(a )
if key[:8] == "encoder.":
__a = key[8:]
if "qkv" in key:
__a = key.split("." )
__a = int(key_split[0][6:] ) - 1
__a = int(key_split[3] )
__a = model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
__a = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__a = (
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def _lowerCamelCase( ):
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _lowerCamelCase( a , a , a , a=False ):
__a = get_mobilevit_config(a )
# load original state_dict
__a = torch.load(a , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__a = MobileViTForSemanticSegmentation(a ).eval()
else:
__a = MobileViTForImageClassification(a ).eval()
__a = convert_state_dict(a , a )
model.load_state_dict(a )
# Check outputs on an image, prepared by MobileViTImageProcessor
__a = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
__a = image_processor(images=prepare_img() , return_tensors="pt" )
__a = model(**a )
__a = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
__a = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__a = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__a = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , a , atol=1E-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
__a = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
__a = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
__a = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , a , atol=1E-4 )
Path(a ).mkdir(exist_ok=a )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a )
if push_to_hub:
__a = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__a = model_mapping[mobilevit_name]
image_processor.push_to_hub(a , organization="apple" )
model.push_to_hub(a , organization="apple" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__:List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE__:str = Lock()
def _lowerCamelCase( a , a , a , a , a , a , a ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__a = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__a = min(a , a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__a = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__a = max(a , a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(a )
def _lowerCamelCase( a ):
__a = []
__a = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__a = Pipe()
__a = Pipe()
process_array_.append(
Process(
target=a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__a = temp_rs
__a = temp_rr
for i in range(1 , len(a ) - 1 ):
__a = Pipe()
__a = Pipe()
process_array_.append(
Process(
target=a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__a = temp_rs
__a = temp_rr
process_array_.append(
Process(
target=a , args=(
len(a ) - 1,
arr[len(a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(a ) ):
__a = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _lowerCamelCase( ):
__a = list(range(1_0 , 0 , -1 ) )
print("Initial List" )
print(*a )
__a = odd_even_transposition(a )
print("Sorted List\n" )
print(*a )
if __name__ == "__main__":
main()
| 67 | """simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=9 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=8 , lowerCamelCase=0.1 , lowerCamelCase=0.002 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=None , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = encoder_seq_length
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = d_ff
__a = relative_attention_num_buckets
__a = dropout_rate
__a = initializer_factor
__a = eos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = None
__a = decoder_layers
def a__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
__a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
__a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
__a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__a = input_ids.clamp(self.pad_token_id + 1 )
__a = decoder_input_ids.clamp(self.pad_token_id + 1 )
__a = self.get_config()
__a = config.num_attention_heads
__a = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
__a = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = result.last_hidden_state
__a = result.past_key_values
__a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
__a = model(**lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_snake_case : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = True
_snake_case : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_snake_case : Optional[Any] = [0.8, 0.9]
def a__ ( self ):
__a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
__a = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def a__ ( self ):
__a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__a = self.model_tester.prepare_config_and_inputs()
__a = config_and_inputs[0]
__a = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
__a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
__a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__a = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
__a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def a__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def a__ ( self ):
__a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCamelCase ).to(lowerCamelCase )
__a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCamelCase , legacy=lowerCamelCase )
__a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__a = tokenizer(lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase ).input_ids
# fmt: off
__a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
__a = model.generate(input_ids.to(lowerCamelCase ) )
__a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import numpy as np
class snake_case__ :
def __init__( self ):
__a = (0, 0)
__a = None
__a = 0
__a = 0
__a = 0
def __eq__( self , lowerCamelCase ):
return self.position == cell.position
def a__ ( self ):
print(self.position )
class snake_case__ :
def __init__( self , lowerCamelCase=(5, 5) ):
__a = np.zeros(lowerCamelCase )
__a = world_size[0]
__a = world_size[1]
def a__ ( self ):
print(self.w )
def a__ ( self , lowerCamelCase ):
__a = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__a = cell.position[0]
__a = cell.position[1]
__a = []
for n in neughbour_cord:
__a = current_x + n[0]
__a = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__a = Cell()
__a = (x, y)
__a = cell
neighbours.append(lowerCamelCase )
return neighbours
def _lowerCamelCase( a , a , a ):
__a = []
__a = []
_open.append(a )
while _open:
__a = np.argmin([n.f for n in _open] )
__a = _open[min_f]
_closed.append(_open.pop(a ) )
if current == goal:
break
for n in world.get_neigbours(a ):
for c in _closed:
if c == n:
continue
__a = current.g + 1
__a , __a = n.position
__a , __a = goal.position
__a = (ya - ya) ** 2 + (xa - xa) ** 2
__a = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(a )
__a = []
while current.parent is not None:
path.append(current.position )
__a = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Any = Gridworld()
# Start position and goal
SCREAMING_SNAKE_CASE__:Tuple = Cell()
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[int] = Cell()
SCREAMING_SNAKE_CASE__:List[str] = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
SCREAMING_SNAKE_CASE__:Union[str, Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
SCREAMING_SNAKE_CASE__:List[Any] = 1
print(world.w)
| 67 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a ):
# Initialise PyTorch model
__a = MobileBertConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
__a = MobileBertForPreTraining(a )
# Load weights from tf checkpoint
__a = load_tf_weights_in_mobilebert(a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a ):
if not isinstance(a , a ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(a ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(a ) == 1:
return True
__a = series[1] - series[0]
for index in range(len(a ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _lowerCamelCase( a ):
if not isinstance(a , a ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(a ) == 0:
raise ValueError("Input list must be a non empty list" )
__a = 0
for val in series:
answer += val
return answer / len(a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__a = input_file.read()
__a = regexp.search(lowerCamelCase )
return match
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a = regexp.finditer(lowerCamelCase )
__a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 67 | 1 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
super().__init__()
self.register_modules(
vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , )
def a__ ( self , lowerCamelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def a__ ( self ):
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 50 , lowerCamelCase = 7.5 , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 0.0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = None , **lowerCamelCase , ):
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = len(lowerCamelCase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase , lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowerCamelCase )}." )
# get prompt text embeddings
__a = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__a = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__a = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a = text_embeddings.shape
__a = text_embeddings.repeat(1 , lowerCamelCase , 1 )
__a = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a = 42
if negative_prompt is None:
__a = [""]
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="
F" {type(lowerCamelCase )}." )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
__a = negative_prompt
__a = text_input_ids.shape[-1]
__a = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="pt" , )
__a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a = uncond_embeddings.shape[1]
__a = uncond_embeddings.repeat(lowerCamelCase , lowerCamelCase , 1 )
__a = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(self.device )
__a = torch.randn(lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(
self.device )
else:
__a = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__a = torch.randn(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
__a = latents_reference.to(self.device )
__a = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a = (latents_shape[3] - latents_shape_reference[3]) // 2
__a = (latents_shape[2] - latents_shape_reference[2]) // 2
__a = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a = 0 if dx < 0 else dx
__a = 0 if dy < 0 else dy
__a = max(-dx , 0 )
__a = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__a = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a = noise_pred.chunk(2 )
__a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = 1 / 0.1_8215 * latents
__a = self.vae.decode(lowerCamelCase ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a = self.feature_extractor(self.numpy_to_pil(lowerCamelCase ) , return_tensors="pt" ).to(
self.device )
__a , __a = self.safety_checker(
images=lowerCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a = None
if output_type == "pil":
__a = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCamelCase , nsfw_content_detected=lowerCamelCase )
| 67 | """simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 67 | 1 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowerCamelCase( a ):
if isinstance(a , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class snake_case__ :
def a__ ( self , lowerCamelCase , lowerCamelCase ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase , lowerCamelCase , F"Difference between torch and flax is {diff} (>= {tol})." )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
__a = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase , lowerCamelCase )
__a = FlaxVisionTextDualEncoderModel(lowerCamelCase )
__a = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
__a , __a = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
__a = {"vision_model": vision_model, "text_model": text_model}
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
__a = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
__a , __a = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
__a = {"vision_model": vision_model, "text_model": text_model}
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
__a = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
__a = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
__a = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
__a = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
__a = after_output[0]
__a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1E-3 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
__a , __a = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
__a = {"vision_model": vision_model, "text_model": text_model}
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
__a = model(
input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , output_attentions=lowerCamelCase )
__a = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = to_atuple(vision_model.config.image_size )
__a = to_atuple(vision_model.config.patch_size )
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__a = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
__a = inputs_dict
__a = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__a = pt_model(**lowerCamelCase ).to_tuple()
__a = fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
__a = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
__a = fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
__a = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase , from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
__a = pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase , pt_output_loaded.numpy() , 4E-2 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase , lowerCamelCase )
__a = VisionTextDualEncoderModel(lowerCamelCase )
__a = FlaxVisionTextDualEncoderModel(lowerCamelCase )
__a = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase )
__a = fx_state
self.check_pt_flax_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase , lowerCamelCase )
__a = VisionTextDualEncoderModel(lowerCamelCase )
__a = FlaxVisionTextDualEncoderModel(lowerCamelCase )
__a = load_flax_weights_in_pytorch_model(lowerCamelCase , fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def a__ ( self ):
__a = self.prepare_config_and_inputs()
__a = config_inputs_dict.pop("vision_config" )
__a = config_inputs_dict.pop("text_config" )
__a = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@slow
def a__ ( self ):
__a , __a = self.get_pretrained_model_and_inputs()
__a = model_a(**lowerCamelCase )
__a = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
__a = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
__a = model_a(**lowerCamelCase )
__a = after_outputs[0]
__a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1E-5 )
@require_flax
class snake_case__ ( snake_case_, unittest.TestCase ):
def a__ ( self ):
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCamelCase , text_from_pt=lowerCamelCase , )
__a = 13
__a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__a = random_attention_mask([batch_size, 4] )
__a = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = FlaxViTModel(lowerCamelCase )
__a = FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def a__ ( self ):
__a = FlaxViTModelTester(self )
__a = FlaxBertModelTester(self )
__a = vit_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a = vision_config_and_inputs
__a , __a , __a , __a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class snake_case__ ( snake_case_, unittest.TestCase ):
def a__ ( self ):
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCamelCase , text_from_pt=lowerCamelCase , )
__a = 13
__a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__a = random_attention_mask([batch_size, 4] )
__a = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = FlaxCLIPVisionModel(lowerCamelCase )
__a = FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def a__ ( self ):
__a = FlaxCLIPVisionModelTester(self )
__a = FlaxBertModelTester(self )
__a = clip_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a = vision_config_and_inputs
__a , __a , __a , __a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
__a = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__a = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCamelCase , padding=lowerCamelCase , return_tensors="np" )
__a = model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__a = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCamelCase , atol=1E-3 ) )
| 67 | """simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE__:Optional[int] = tuple[int, int]
class snake_case__ :
def __init__( self ):
__a = []
__a = set()
def a__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def a__ ( self ):
return len(self.elements ) == 0
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def a__ ( self , lowerCamelCase ):
if item in self.set:
self.set.remove(lowerCamelCase )
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def a__ ( self ):
return self.elements[0][1]
def a__ ( self ):
((__a) , (__a)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _lowerCamelCase( a , a ):
# euclidean distance
__a = np.array(a )
__a = np.array(a )
return np.linalg.norm(a - b )
def _lowerCamelCase( a , a ):
# integer division by time variable
return consistent_heuristic(a , a ) // t
def _lowerCamelCase( a , a ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase( a , a , a , a ):
__a = g_function[start] + Wa * heuristics[i](a , a )
return ans
def _lowerCamelCase( a , a , a ):
__a = np.chararray((n, n) )
for i in range(a ):
for j in range(a ):
__a = "*"
for i in range(a ):
for j in range(a ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(a ):
for j in range(a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__a = back_pointer[goal]
while x != start:
print(a , end=" " )
__a = back_pointer[x]
print(a )
sys.exit()
def _lowerCamelCase( a ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase( a , a , a , a , a , a , a , a , ):
for itera in range(a ):
open_list[itera].remove_element(a )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a )
__a = -1
__a = float("inf" )
if valid(a ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(a , key(a , 0 , a , a ) )
if neighbours not in close_list_inad:
for var in range(1 , a ):
if key(a , a , a , a ) <= Wa * key(
a , 0 , a , a ):
open_list[j].put(
a , key(a , a , a , a ) )
def _lowerCamelCase( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE__:Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE__:str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE__:int = make_common_ground()
SCREAMING_SNAKE_CASE__:List[str] = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE__:str = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 20
SCREAMING_SNAKE_CASE__:Dict = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[Any] = (n - 1, n - 1)
SCREAMING_SNAKE_CASE__:List[str] = 1
def _lowerCamelCase( a , a , a ):
__a = {start: 0, goal: float("inf" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(a ):
open_list.append(PriorityQueue() )
open_list[i].put(a , key(a , a , a , a ) )
__a = []
__a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a , __a = open_list[i].top_show()
visited.add(a )
expand_state(
a , a , a , a , a , a , a , a , )
close_list_inad.append(a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a = open_list[0].top_show()
visited.add(a )
expand_state(
a , 0 , a , a , a , a , a , a , )
close_list_anchor.append(a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def _lowerCamelCase( a , a , a ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(a , 2 ) - pow(a , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(a , 2 ) - pow(a , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(a , 2 ) + pow(a , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
SCREAMING_SNAKE_CASE__:Any = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase( a ):
__a = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__a = Stack()
__a = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a ) )
elif i in operators:
# RULE 2
operator_stack.push(a )
elif i == ")":
# RULE 4
__a = operator_stack.peek()
operator_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operators[opr](a , a )
operand_stack.push(a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 67 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Dict = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = IMAGENET_DEFAULT_MEAN , lowerCamelCase = IMAGENET_DEFAULT_STD , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a = int((256 / 224) * size["shortest_edge"] )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
lowerCamelCase , size=(size_dict["height"], size_dict["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(lowerCamelCase , lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(lowerCamelCase , lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | """simple docstring"""
from math import pi
def _lowerCamelCase( a , a ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 67 | 1 |
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _lowerCamelCase( a , a , a=[] ):
__a = size[0] - overlap_pixels * 2
__a = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__a = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_5_5
__a = np.pad(a , mode="linear_ramp" , pad_width=a , end_values=0 )
if "l" in remove_borders:
__a = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__a = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__a = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__a = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _lowerCamelCase( a , a , a ):
return max(a , min(a , a ) )
def _lowerCamelCase( a , a , a ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _lowerCamelCase( a , a , a ):
__a = list(a )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__a = clamp_rect(a , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _lowerCamelCase( a , a , a , a ):
__a = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(a , (original_slice, 0) )
return result
def _lowerCamelCase( a , a ):
__a = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__a = tile.crop(a )
return tile
def _lowerCamelCase( a , a ):
__a = n % d
return n - divisor
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 350 , ):
super().__init__(
vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , low_res_scheduler=lowerCamelCase , scheduler=lowerCamelCase , max_noise_level=lowerCamelCase , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
torch.manual_seed(0 )
__a = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__a = add_overlap_rect(lowerCamelCase , lowerCamelCase , image.size )
__a = image.crop(lowerCamelCase )
__a = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__a = translated_slice_x - (original_image_slice / 2)
__a = max(0 , lowerCamelCase )
__a = squeeze_tile(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = to_input.size
__a = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__a = super(lowerCamelCase , self ).__call__(image=lowerCamelCase , **lowerCamelCase ).images[0]
__a = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__a = unsqueeze_tile(lowerCamelCase , lowerCamelCase )
__a = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__a = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
__a = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=lowerCamelCase ) , mode="L" , )
final_image.paste(
lowerCamelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 75 , lowerCamelCase = 9.0 , lowerCamelCase = 50 , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 0.0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 128 , lowerCamelCase = 32 , lowerCamelCase = 32 , ):
__a = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
__a = math.ceil(image.size[0] / tile_size )
__a = math.ceil(image.size[1] / tile_size )
__a = tcx * tcy
__a = 0
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
self._process_tile(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , prompt=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , noise_level=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def _lowerCamelCase( ):
# Run a demo
__a = "stabilityai/stable-diffusion-x4-upscaler"
__a = StableDiffusionTiledUpscalePipeline.from_pretrained(a , revision="fp16" , torch_dtype=torch.floataa )
__a = pipe.to("cuda" )
__a = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(a ):
print(F"progress: {obj['progress']:.4f}" )
obj["image"].save("diffusers_library_progress.jpg" )
__a = pipe(image=a , prompt="Black font, white background, vector" , noise_level=4_0 , callback=a )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 67 | """simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Dict = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = IMAGENET_DEFAULT_MEAN , lowerCamelCase = IMAGENET_DEFAULT_STD , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a = int((256 / 224) * size["shortest_edge"] )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
lowerCamelCase , size=(size_dict["height"], size_dict["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(lowerCamelCase , lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(lowerCamelCase , lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a , a ):
if not isinstance(a , a ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(a , a ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__a = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(a )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=None , lowerCamelCase=2 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = ViTForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.type_sequence_label_size
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case : List[Any] = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case : int = True
_snake_case : int = False
_snake_case : str = False
_snake_case : Optional[Any] = False
def a__ ( self ):
__a = ViTModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__a = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCamelCase )
__a = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(lowerCamelCase , interpolate_pos_encoding=lowerCamelCase )
# verify the logits
__a = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase )
__a = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a__ ( self ):
__a = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a = model(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a , a ):
return number | (1 << position)
def _lowerCamelCase( a , a ):
return number & ~(1 << position)
def _lowerCamelCase( a , a ):
return number ^ (1 << position)
def _lowerCamelCase( a , a ):
return ((number >> position) & 1) == 1
def _lowerCamelCase( a , a ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = DetaImageProcessor if is_vision_available() else None
def a__ ( self ):
__a = DetaImageProcessingTester(self )
@property
def a__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def a__ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 39769, "annotations": target}
# encode them
__a = DetaImageProcessor()
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def a__ ( self ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetaImageProcessor(format="coco_panoptic" )
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__a = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 67 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:str = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__:Dict = logging.getLogger()
def _lowerCamelCase( ):
__a = argparse.ArgumentParser()
parser.add_argument("-f" )
__a = parser.parse_args()
return args.f
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ):
__a = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__ ( snake_case_ ):
_snake_case : List[Any] = (PNDMScheduler,)
_snake_case : List[Any] = (("""num_inference_steps""", 50),)
def a__ ( self , **lowerCamelCase ):
__a = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCamelCase )
return config
def a__ ( self , lowerCamelCase=0 , **lowerCamelCase ):
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("num_inference_steps" , lowerCamelCase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**lowerCamelCase )
__a = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
__a = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step_prk(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
__a = new_scheduler.step_prk(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step_plms(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
__a = new_scheduler.step_plms(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a__ ( self ):
pass
def a__ ( self , lowerCamelCase=0 , **lowerCamelCase ):
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("num_inference_steps" , lowerCamelCase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
__a = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step_prk(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
__a = new_scheduler.step_prk(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step_plms(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
__a = new_scheduler.step_plms(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a__ ( self , **lowerCamelCase ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**lowerCamelCase )
__a = scheduler_class(**lowerCamelCase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
__a = model(lowerCamelCase , lowerCamelCase )
__a = scheduler.step_prk(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__a = model(lowerCamelCase , lowerCamelCase )
__a = scheduler.step_plms(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def a__ ( self ):
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("num_inference_steps" , lowerCamelCase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**lowerCamelCase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(lowerCamelCase )
elif num_inference_steps is not None and not hasattr(lowerCamelCase , "set_timesteps" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.step_prk(lowerCamelCase , 0 , lowerCamelCase , **lowerCamelCase ).prev_sample
__a = scheduler.step_prk(lowerCamelCase , 1 , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step_plms(lowerCamelCase , 0 , lowerCamelCase , **lowerCamelCase ).prev_sample
__a = scheduler.step_plms(lowerCamelCase , 1 , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a__ ( self ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def a__ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase )
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(steps_offset=1 )
__a = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def a__ ( self ):
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def a__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def a__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def a__ ( self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase )
def a__ ( self ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase )
def a__ ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__a = 27
for scheduler_class in self.scheduler_classes:
__a = self.dummy_sample
__a = 0.1 * sample
__a = self.get_scheduler_config()
__a = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__a = scheduler.step_prk(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
def a__ ( self ):
with self.assertRaises(lowerCamelCase ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**lowerCamelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def a__ ( self ):
__a = self.full_loop()
__a = torch.sum(torch.abs(lowerCamelCase ) )
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def a__ ( self ):
__a = self.full_loop(prediction_type="v_prediction" )
__a = torch.sum(torch.abs(lowerCamelCase ) )
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def a__ ( self ):
# We specify different beta, so that the first alpha is 0.99
__a = self.full_loop(set_alpha_to_one=lowerCamelCase , beta_start=0.01 )
__a = torch.sum(torch.abs(lowerCamelCase ) )
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def a__ ( self ):
# We specify different beta, so that the first alpha is 0.99
__a = self.full_loop(set_alpha_to_one=lowerCamelCase , beta_start=0.01 )
__a = torch.sum(torch.abs(lowerCamelCase ) )
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 67 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__:Any = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = ["""input_ids""", """attention_mask"""]
_snake_case : Dict = GPTaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
__a = kwargs.pop("add_bos_token" , lowerCamelCase )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = """dpr"""
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=0 , lowerCamelCase="absolute" , lowerCamelCase = 0 , **lowerCamelCase , ):
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = projection_dim
__a = position_embedding_type
| 67 | """simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCamelCase( a , a , a ):
__a = hf_hub_url(repo_id=a , path=a , revision=a )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a )}"
| 67 | 1 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase = "▁" , lowerCamelCase = True , lowerCamelCase = "<unk>" , lowerCamelCase = "</s>" , lowerCamelCase = "<pad>" , ):
__a = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
__a = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__a = token_dict["token"]
__a = Tokenizer(Unigram() )
__a = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
__a = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowerCamelCase , add_prefix_space=lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
__a = decoders.Metaspace(replacement=lowerCamelCase , add_prefix_space=lowerCamelCase )
__a = TemplateProcessing(
single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
__a = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(lowerCamelCase , lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = 8000 , lowerCamelCase = True , ):
__a = trainers.UnigramTrainer(
vocab_size=lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=lowerCamelCase , )
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = [files]
self._tokenizer.train(lowerCamelCase , trainer=lowerCamelCase )
self.add_unk_id()
def a__ ( self , lowerCamelCase , lowerCamelCase = 8000 , lowerCamelCase = True , ):
__a = trainers.UnigramTrainer(
vocab_size=lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=lowerCamelCase , )
self._tokenizer.train_from_iterator(lowerCamelCase , trainer=lowerCamelCase )
self.add_unk_id()
def a__ ( self ):
__a = json.loads(self._tokenizer.to_str() )
__a = self.special_tokens["unk"]["id"]
__a = Tokenizer.from_str(json.dumps(lowerCamelCase ) )
| 67 | """simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
if len(a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__a = (left + right) >> 1 # the middle
__a = find_max(a , a , a ) # find max in range[left, mid]
__a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 67 | 1 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _lowerCamelCase( a ):
__a = args.pruning_method
__a = args.threshold
__a = args.model_name_or_path.rstrip("/" )
__a = args.target_model_path
print(F"Load fine-pruned model from {model_name_or_path}" )
__a = torch.load(os.path.join(a , "pytorch_model.bin" ) )
__a = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__a = tensor
print(F"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
__a = tensor
print(F"Copied layer {name}" )
elif "bias" in name:
__a = tensor
print(F"Copied layer {name}" )
else:
if pruning_method == "magnitude":
__a = MagnitudeBinarizer.apply(inputs=a , threshold=a )
__a = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__a = name[:-6]
__a = model[F"{prefix_}mask_scores"]
__a = TopKBinarizer.apply(a , a )
__a = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__a = name[:-6]
__a = model[F"{prefix_}mask_scores"]
__a = ThresholdBinarizer.apply(a , a , a )
__a = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__a = name[:-6]
__a = model[F"{prefix_}mask_scores"]
__a , __a = -0.1, 1.1
__a = torch.sigmoid(a )
__a = s * (r - l) + l
__a = s_bar.clamp(min=0.0 , max=1.0 )
__a = tensor * mask
print(F"Pruned layer {name}" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
__a = os.path.join(
os.path.dirname(a ) , F"bertarized_{os.path.basename(a )}" )
if not os.path.isdir(a ):
shutil.copytree(a , a )
print(F"\nCreated folder {target_model_path}" )
torch.save(a , os.path.join(a , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
main(args)
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class snake_case__ ( snake_case_ ):
_snake_case : Any = """big_bird"""
def __init__( self , lowerCamelCase=50358 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=4096 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=66 , lowerCamelCase="block_sparse" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=64 , lowerCamelCase=3 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , sep_token_id=lowerCamelCase , **lowerCamelCase , )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
__a = classifier_dropout
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | 1 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase( a , a , a=1_0_2_4 , a=1_0_2_4 , a=False , **a ):
__a = AutoTokenizer.from_pretrained(a )
__a = SeqaSeqDataset(a , a , a , a , type_path="train" , **a )
__a = tok.pad_token_id
def get_lens(a ):
__a = tqdm(
DataLoader(a , batch_size=5_1_2 , num_workers=8 , shuffle=a , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__a = []
for batch in dl:
__a = batch["input_ids"].ne(a ).sum(1 ).tolist()
__a = batch["labels"].ne(a ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a , a ):
max_lens.append(max(a , a ) )
else:
max_lens.extend(a )
return max_lens
__a = get_lens(a )
__a = SeqaSeqDataset(a , a , a , a , type_path="val" , **a )
__a = get_lens(a )
pickle_save(a , train_ds.len_file )
pickle_save(a , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
_snake_case : Optional[int] = None
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a ):
__a = generate_pascal_triangle(a )
for row_idx in range(a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def _lowerCamelCase( a ):
if not isinstance(a , a ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
__a = []
for current_row_idx in range(a ):
__a = populate_current_row(a , a )
triangle.append(a )
return triangle
def _lowerCamelCase( a , a ):
__a = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__a , __a = 1, 1
for current_col_idx in range(1 , a ):
calculate_current_element(
a , a , a , a )
return current_row
def _lowerCamelCase( a , a , a , a , ):
__a = triangle[current_row_idx - 1][current_col_idx - 1]
__a = triangle[current_row_idx - 1][current_col_idx]
__a = above_to_left_elt + above_to_right_elt
def _lowerCamelCase( a ):
if not isinstance(a , a ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
__a = [[1]]
for row_index in range(1 , a ):
__a = [0] + result[-1] + [0]
__a = row_index + 1
# Calculate the number of distinct elements in a row
__a = sum(divmod(a , 2 ) )
__a = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__a = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__a = row_first_half + row_second_half
result.append(a )
return result
def _lowerCamelCase( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a , a ) -> None:
__a = F"{func.__name__}({value})"
__a = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(a , a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 67 | """simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
__a , *__a = self.shape
__a = int(np.prod(lowerCamelCase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(lowerCamelCase )
__a = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCamelCase ):
__a , *__a , __a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(lowerCamelCase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(lowerCamelCase , -1 , 2 )
__a = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( a ):
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a = np.array([np.sin(a ), np.cos(a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(a ), -np.sin(a ), 0.0] )
__a = np.cross(a , a )
origins.append(a )
xs.append(a )
ys.append(a )
zs.append(a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a , axis=0 ) ).float() , width=a , height=a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a )) , )
| 67 | 1 |
"""simple docstring"""
from math import pi
def _lowerCamelCase( a , a ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 67 | """simple docstring"""
def _lowerCamelCase( a ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowerCamelCase( a ):
__a = 0
__a = number
while duplicate > 0:
__a , __a = divmod(a , 1_0 )
fact_sum += factorial(a )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
SCREAMING_SNAKE_CASE__:Optional[Any] = int(input("""Enter number: """).strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a , a ):
_validate_point(a )
_validate_point(a )
if len(a ) != len(a ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(a , a ) ) )
def _lowerCamelCase( a ):
if point:
if isinstance(a , a ):
for item in point:
if not isinstance(a , (int, float) ):
__a = (
"Expected a list of numbers as input, found "
F"{type(a ).__name__}"
)
raise TypeError(a )
else:
__a = F"Expected a list of numbers as input, found {type(a ).__name__}"
raise TypeError(a )
else:
raise ValueError("Missing an input" )
def _lowerCamelCase( a , a ):
_validate_point(a )
_validate_point(a )
if len(a ) != len(a ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(a , a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Dict = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """ibert"""
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=False , lowerCamelCase="none" , **lowerCamelCase , ):
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = quant_mode
__a = force_dequant
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | """simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( a , a , a ):
__a = OmegaConf.load(a )
__a = torch.load(a , map_location="cpu" )["model"]
__a = list(state_dict.keys() )
# extract state_dict for VQVAE
__a = {}
__a = "first_stage_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
# extract state_dict for UNetLDM
__a = {}
__a = "model.diffusion_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
__a = config.model.params.first_stage_config.params
__a = config.model.params.unet_config.params
__a = VQModel(**a ).eval()
vqvae.load_state_dict(a )
__a = UNetLDMModel(**a ).eval()
unet.load_state_dict(a )
__a = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , )
__a = LDMPipeline(a , a , a )
pipeline.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 67 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class snake_case__ ( snake_case_ ):
_snake_case : Any = """big_bird"""
def __init__( self , lowerCamelCase=50358 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=4096 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=66 , lowerCamelCase="block_sparse" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=64 , lowerCamelCase=3 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , sep_token_id=lowerCamelCase , **lowerCamelCase , )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
__a = classifier_dropout
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import string
def _lowerCamelCase( a ):
for key in range(len(string.ascii_uppercase ) ):
__a = ""
for symbol in message:
if symbol in string.ascii_uppercase:
__a = string.ascii_uppercase.find(a )
__a = num - key
if num < 0:
__a = num + len(string.ascii_uppercase )
__a = translated + string.ascii_uppercase[num]
else:
__a = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def _lowerCamelCase( ):
__a = input("Encrypted message: " )
__a = message.upper()
decrypt(a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 67 | """simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=9 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=8 , lowerCamelCase=0.1 , lowerCamelCase=0.002 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=None , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = encoder_seq_length
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = d_ff
__a = relative_attention_num_buckets
__a = dropout_rate
__a = initializer_factor
__a = eos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = None
__a = decoder_layers
def a__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
__a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
__a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
__a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__a = input_ids.clamp(self.pad_token_id + 1 )
__a = decoder_input_ids.clamp(self.pad_token_id + 1 )
__a = self.get_config()
__a = config.num_attention_heads
__a = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
__a = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = result.last_hidden_state
__a = result.past_key_values
__a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
__a = model(**lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_snake_case : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = True
_snake_case : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_snake_case : Optional[Any] = [0.8, 0.9]
def a__ ( self ):
__a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
__a = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def a__ ( self ):
__a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__a = self.model_tester.prepare_config_and_inputs()
__a = config_and_inputs[0]
__a = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
__a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
__a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__a = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
__a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def a__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def a__ ( self ):
__a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCamelCase ).to(lowerCamelCase )
__a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCamelCase , legacy=lowerCamelCase )
__a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__a = tokenizer(lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase ).input_ids
# fmt: off
__a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
__a = model.generate(input_ids.to(lowerCamelCase ) )
__a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class snake_case__ ( snake_case_ ):
_snake_case : jnp.ndarray
@flax_register_to_config
class snake_case__ ( nn.Module, snake_case_, snake_case_ ):
_snake_case : int = 32
_snake_case : int = 4
_snake_case : int = 4
_snake_case : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_snake_case : Union[bool, Tuple[bool]] = False
_snake_case : Tuple[int] = (320, 640, 1_280, 1_280)
_snake_case : int = 2
_snake_case : Union[int, Tuple[int]] = 8
_snake_case : Optional[Union[int, Tuple[int]]] = None
_snake_case : int = 1_280
_snake_case : float = 0.0
_snake_case : bool = False
_snake_case : jnp.dtype = jnp.floataa
_snake_case : bool = True
_snake_case : int = 0
_snake_case : bool = False
def a__ ( self , lowerCamelCase ):
# init input tensors
__a = (1, self.in_channels, self.sample_size, self.sample_size)
__a = jnp.zeros(lowerCamelCase , dtype=jnp.floataa )
__a = jnp.ones((1,) , dtype=jnp.intaa )
__a = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__a , __a = jax.random.split(lowerCamelCase )
__a = {"params": params_rng, "dropout": dropout_rng}
return self.init(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )["params"]
def a__ ( self ):
__a = self.block_out_channels
__a = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__a = self.num_attention_heads or self.attention_head_dim
# input
__a = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__a = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__a = FlaxTimestepEmbedding(lowerCamelCase , dtype=self.dtype )
__a = self.only_cross_attention
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = (num_attention_heads,) * len(self.down_block_types )
# down
__a = []
__a = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__a = output_channel
__a = block_out_channels[i]
__a = i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__a = FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__a = FlaxDownBlockaD(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCamelCase )
__a = down_blocks
# mid
__a = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__a = []
__a = list(reversed(lowerCamelCase ) )
__a = list(reversed(lowerCamelCase ) )
__a = list(reversed(lowerCamelCase ) )
__a = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__a = output_channel
__a = reversed_block_out_channels[i]
__a = reversed_block_out_channels[min(i + 1 , len(lowerCamelCase ) - 1 )]
__a = i == len(lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__a = FlaxCrossAttnUpBlockaD(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , prev_output_channel=lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__a = FlaxUpBlockaD(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , prev_output_channel=lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowerCamelCase )
__a = output_channel
__a = up_blocks
# out
__a = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__a = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase = True , lowerCamelCase = False , ):
# 1. time
if not isinstance(lowerCamelCase , jnp.ndarray ):
__a = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__a = timesteps.astype(dtype=jnp.floataa )
__a = jnp.expand_dims(lowerCamelCase , 0 )
__a = self.time_proj(lowerCamelCase )
__a = self.time_embedding(lowerCamelCase )
# 2. pre-process
__a = jnp.transpose(lowerCamelCase , (0, 2, 3, 1) )
__a = self.conv_in(lowerCamelCase )
# 3. down
__a = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase , lowerCamelCase ):
__a , __a = down_block(lowerCamelCase , lowerCamelCase , lowerCamelCase , deterministic=not train )
else:
__a , __a = down_block(lowerCamelCase , lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__a = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowerCamelCase , lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__a = new_down_block_res_samples
# 4. mid
__a = self.mid_block(lowerCamelCase , lowerCamelCase , lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__a = down_block_res_samples[-(self.layers_per_block + 1) :]
__a = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = up_block(
lowerCamelCase , temb=lowerCamelCase , encoder_hidden_states=lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , deterministic=not train , )
else:
__a = up_block(lowerCamelCase , temb=lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , deterministic=not train )
# 6. post-process
__a = self.conv_norm_out(lowerCamelCase )
__a = nn.silu(lowerCamelCase )
__a = self.conv_out(lowerCamelCase )
__a = jnp.transpose(lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowerCamelCase )
| 67 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a ):
# Initialise PyTorch model
__a = MobileBertConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
__a = MobileBertForPreTraining(a )
# Load weights from tf checkpoint
__a = load_tf_weights_in_mobilebert(a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 67 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCamelCase( ):
__a = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=a , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=a , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=a )
return parser.parse_args()
def _lowerCamelCase( ):
__a = parse_args()
# Import training_script as a module.
__a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__a = script_fpath.stem
__a = importlib.import_module(a )
# Patch sys.argv
__a = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 67 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__a = input_file.read()
__a = regexp.search(lowerCamelCase )
return match
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a = regexp.finditer(lowerCamelCase )
__a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 67 | 1 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _lowerCamelCase( a ):
__a = {}
__a = tokenizer(example["content"] , truncation=a )["input_ids"]
__a = len(example["content"] ) / len(output["input_ids"] )
return output
SCREAMING_SNAKE_CASE__:Optional[Any] = HfArgumentParser(PretokenizationArguments)
SCREAMING_SNAKE_CASE__:List[str] = parser.parse_args()
if args.num_workers is None:
SCREAMING_SNAKE_CASE__:int = multiprocessing.cpu_count()
SCREAMING_SNAKE_CASE__:List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
SCREAMING_SNAKE_CASE__:List[Any] = time.time()
SCREAMING_SNAKE_CASE__:Optional[int] = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
SCREAMING_SNAKE_CASE__:List[str] = time.time()
SCREAMING_SNAKE_CASE__:Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
SCREAMING_SNAKE_CASE__:Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 67 | """simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a , a ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__a = str(bin(a ) )[2:] # remove the leading "0b"
__a = str(bin(a ) )[2:] # remove the leading "0b"
__a = max(len(a ) , len(a ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(a ) , b_binary.zfill(a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE__:Optional[int] = tuple[int, int]
class snake_case__ :
def __init__( self ):
__a = []
__a = set()
def a__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def a__ ( self ):
return len(self.elements ) == 0
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def a__ ( self , lowerCamelCase ):
if item in self.set:
self.set.remove(lowerCamelCase )
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def a__ ( self ):
return self.elements[0][1]
def a__ ( self ):
((__a) , (__a)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _lowerCamelCase( a , a ):
# euclidean distance
__a = np.array(a )
__a = np.array(a )
return np.linalg.norm(a - b )
def _lowerCamelCase( a , a ):
# integer division by time variable
return consistent_heuristic(a , a ) // t
def _lowerCamelCase( a , a ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase( a , a , a , a ):
__a = g_function[start] + Wa * heuristics[i](a , a )
return ans
def _lowerCamelCase( a , a , a ):
__a = np.chararray((n, n) )
for i in range(a ):
for j in range(a ):
__a = "*"
for i in range(a ):
for j in range(a ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(a ):
for j in range(a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__a = back_pointer[goal]
while x != start:
print(a , end=" " )
__a = back_pointer[x]
print(a )
sys.exit()
def _lowerCamelCase( a ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase( a , a , a , a , a , a , a , a , ):
for itera in range(a ):
open_list[itera].remove_element(a )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a )
__a = -1
__a = float("inf" )
if valid(a ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(a , key(a , 0 , a , a ) )
if neighbours not in close_list_inad:
for var in range(1 , a ):
if key(a , a , a , a ) <= Wa * key(
a , 0 , a , a ):
open_list[j].put(
a , key(a , a , a , a ) )
def _lowerCamelCase( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE__:Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE__:str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE__:int = make_common_ground()
SCREAMING_SNAKE_CASE__:List[str] = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE__:str = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 20
SCREAMING_SNAKE_CASE__:Dict = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[Any] = (n - 1, n - 1)
SCREAMING_SNAKE_CASE__:List[str] = 1
def _lowerCamelCase( a , a , a ):
__a = {start: 0, goal: float("inf" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(a ):
open_list.append(PriorityQueue() )
open_list[i].put(a , key(a , a , a , a ) )
__a = []
__a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a , __a = open_list[i].top_show()
visited.add(a )
expand_state(
a , a , a , a , a , a , a , a , )
close_list_inad.append(a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a = open_list[0].top_show()
visited.add(a )
expand_state(
a , 0 , a , a , a , a , a , a , )
close_list_anchor.append(a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowerCamelCase( a ):
__a = 0
__a = number
while duplicate > 0:
__a , __a = divmod(a , 1_0 )
fact_sum += factorial(a )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
SCREAMING_SNAKE_CASE__:Optional[Any] = int(input("""Enter number: """).strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 67 | """simple docstring"""
SCREAMING_SNAKE_CASE__:Any = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase( a ):
__a = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__a = Stack()
__a = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a ) )
elif i in operators:
# RULE 2
operator_stack.push(a )
elif i == ")":
# RULE 4
__a = operator_stack.peek()
operator_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operators[opr](a , a )
operand_stack.push(a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 67 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class snake_case__ :
def __init__( self ):
__a = []
def a__ ( self , lowerCamelCase ):
return self.node_position[vertex]
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = pos
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__a = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__a = 2 * start + 1
else:
__a = 2 * start + 2
if heap[smallest_child] < heap[start]:
__a , __a = heap[smallest_child], positions[smallest_child]
__a , __a = (
heap[start],
positions[start],
)
__a , __a = temp, tempa
__a = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase )
self.top_to_bottom(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = position[index]
while index != 0:
__a = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__a = heap[parent]
__a = position[parent]
self.set_position(position[parent] , lowerCamelCase )
else:
__a = val
__a = temp
self.set_position(lowerCamelCase , lowerCamelCase )
break
__a = parent
else:
__a = val
__a = temp
self.set_position(lowerCamelCase , 0 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase , -1 , -1 ):
self.top_to_bottom(lowerCamelCase , lowerCamelCase , len(lowerCamelCase ) , lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = positions[0]
__a = sys.maxsize
self.top_to_bottom(lowerCamelCase , 0 , len(lowerCamelCase ) , lowerCamelCase )
return temp
def _lowerCamelCase( a ):
__a = Heap()
__a = [0] * len(a )
__a = [-1] * len(a ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__a = [] # Heap of Distance of vertices from their neighboring vertex
__a = []
for vertex in range(len(a ) ):
distance_tv.append(sys.maxsize )
positions.append(a )
heap.node_position.append(a )
__a = []
__a = 1
__a = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__a = 0
__a = distance
heap.heapify(a , a )
for _ in range(1 , len(a ) ):
__a = heap.delete_minimum(a , a )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__a = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(a )]
):
__a = distance
heap.bottom_to_top(
a , heap.get_position(a ) , a , a )
__a = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
SCREAMING_SNAKE_CASE__:str = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__:List[str] = defaultdict(list)
for _ in range(edges_number):
SCREAMING_SNAKE_CASE__:Tuple = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 67 | """simple docstring"""
from math import pi
def _lowerCamelCase( a , a ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 67 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:str = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Dict = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = IMAGENET_DEFAULT_MEAN , lowerCamelCase = IMAGENET_DEFAULT_STD , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a = int((256 / 224) * size["shortest_edge"] )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
lowerCamelCase , size=(size_dict["height"], size_dict["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(lowerCamelCase , lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(lowerCamelCase , lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__a = 1
__a = 1
while repunit:
__a = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowerCamelCase( a = 1_0_0_0_0_0_0 ):
__a = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(a ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 67 | """simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=None , lowerCamelCase=2 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = ViTForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.type_sequence_label_size
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case : List[Any] = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case : int = True
_snake_case : int = False
_snake_case : str = False
_snake_case : Optional[Any] = False
def a__ ( self ):
__a = ViTModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__a = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCamelCase )
__a = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(lowerCamelCase , interpolate_pos_encoding=lowerCamelCase )
# verify the logits
__a = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase )
__a = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a__ ( self ):
__a = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a = model(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( a , a , a , a , a , a ):
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__a = "lm_head"
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
else:
for key, mapped_key in MAPPING.items():
__a = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a = "weight"
else:
__a = None
set_recursively(a , a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
@torch.no_grad()
def _lowerCamelCase( a , a , a=None , a=None , a=True ):
if config_path is not None:
__a = UniSpeechConfig.from_pretrained(a )
else:
__a = UniSpeechConfig()
if is_finetuned:
if dict_path:
__a = Dictionary.load_from_json(a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a = target_dict.pad_index
__a = target_dict.bos_index
__a = target_dict.eos_index
__a = len(target_dict.symbols )
__a = os.path.join(a , "vocab.json" )
if not os.path.isdir(a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(a ) )
return
os.makedirs(a , exist_ok=a )
__a = target_dict.indices
# fairseq has the <pad> and <s> switched
__a = 4_2
__a = 4_3
with open(a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(a , a )
__a = WavaVecaPhonemeCTCTokenizer(
a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=a , )
__a = True if config.feat_extract_norm == "layer" else False
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
__a = WavaVecaProcessor(feature_extractor=a , tokenizer=a )
processor.save_pretrained(a )
__a = UniSpeechForCTC(a )
else:
__a = UniSpeechForPreTraining(a )
if is_finetuned:
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__a = model[0].eval()
recursively_load_weights(a , a , a )
hf_unispeech.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
SCREAMING_SNAKE_CASE__:Any = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 67 | """simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = DetaImageProcessor if is_vision_available() else None
def a__ ( self ):
__a = DetaImageProcessingTester(self )
@property
def a__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def a__ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 39769, "annotations": target}
# encode them
__a = DetaImageProcessor()
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def a__ ( self ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetaImageProcessor(format="coco_panoptic" )
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__a = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 67 | 1 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 67 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__:Dict = logging.getLogger()
def _lowerCamelCase( ):
__a = argparse.ArgumentParser()
parser.add_argument("-f" )
__a = parser.parse_args()
return args.f
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ):
__a = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class snake_case__ ( snake_case_ ):
_snake_case : int = """mobilenet_v2"""
def __init__( self , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=1.0 , lowerCamelCase=8 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=32 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="relu6" , lowerCamelCase=True , lowerCamelCase=0.8 , lowerCamelCase=0.02 , lowerCamelCase=0.001 , lowerCamelCase=255 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = depth_divisible_by
__a = min_depth
__a = expand_ratio
__a = output_stride
__a = first_layer_is_expansion
__a = finegrained_output
__a = hidden_act
__a = tf_padding
__a = classifier_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = semantic_loss_ignore_index
class snake_case__ ( snake_case_ ):
_snake_case : Union[str, Any] = version.parse("""1.11""" )
@property
def a__ ( self ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def a__ ( self ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def a__ ( self ):
return 1E-4
| 67 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
__a = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__a = 1
if upper_limit > 0:
__a = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
SCREAMING_SNAKE_CASE__:str = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = ["""input_ids""", """attention_mask"""]
_snake_case : Dict = GPTaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
__a = kwargs.pop("add_bos_token" , lowerCamelCase )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = BioGptModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = BioGptForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase ):
__a = BioGptModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# create attention mask
__a = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase )
__a = self.seq_length // 2
__a = 0
# first forward pass
__a , __a = model(lowerCamelCase , attention_mask=lowerCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__a = ids_tensor((1,) , lowerCamelCase ).item() + 1
__a = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__a = random_other_next_tokens
# append to next input_ids and attn_mask
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCamelCase )] , dim=1 , )
# get two different outputs
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase , attention_mask=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase ):
__a = BioGptModel(config=lowerCamelCase ).to(lowerCamelCase ).eval()
__a = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase )
# first forward pass
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
__a , __a = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase )[
"last_hidden_state"
]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , lowerCamelCase=False ):
__a = BioGptForCausalLM(lowerCamelCase )
model.to(lowerCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def a__ ( self , lowerCamelCase , *lowerCamelCase ):
__a = BioGptModel(lowerCamelCase )
__a = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase ):
__a = self.num_labels
__a = BioGptForTokenClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : List[str] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_snake_case : Union[str, Any] = (BioGptForCausalLM,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : List[str] = False
def a__ ( self ):
__a = BioGptModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCamelCase , gradient_checkpointing=lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCamelCase )
@slow
def a__ ( self ):
__a = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(lowerCamelCase )
__a = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__a = "left"
# Define PAD Token = EOS Token = 50256
__a = tokenizer.eos_token
__a = model.config.eos_token_id
# use different length sentences to test batching
__a = [
"Hello, my dog is a little",
"Today, I",
]
__a = tokenizer(lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase )
__a = inputs["input_ids"].to(lowerCamelCase )
__a = model.generate(
input_ids=lowerCamelCase , attention_mask=inputs["attention_mask"].to(lowerCamelCase ) , )
__a = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(lowerCamelCase )
__a = model.generate(input_ids=lowerCamelCase )
__a = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
__a = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(lowerCamelCase )
__a = model.generate(input_ids=lowerCamelCase , max_length=model.config.max_length - num_paddings )
__a = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
__a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase )
__a = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase )
__a = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def a__ ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = BioGptModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = input_dict["input_ids"]
__a = input_ids.ne(1 ).to(lowerCamelCase )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = BioGptForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = "multi_label_classification"
__a = input_dict["input_ids"]
__a = input_ids.ne(1 ).to(lowerCamelCase )
__a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__a = BioGptForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
__a = torch.tensor([[2, 4805, 9, 656, 21]] )
__a = model(lowerCamelCase )[0]
__a = 42384
__a = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
__a = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__a = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(lowerCamelCase )
torch.manual_seed(0 )
__a = tokenizer("COVID-19 is" , return_tensors="pt" ).to(lowerCamelCase )
__a = model.generate(
**lowerCamelCase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowerCamelCase , )
__a = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase )
__a = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 67 | """simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCamelCase( a , a , a ):
__a = hf_hub_url(repo_id=a , path=a , revision=a )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a )}"
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a = 4_0_0_0_0_0_0 ):
__a = []
__a , __a = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(a )
__a , __a = b, a + b
return sum(a )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 67 | """simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
if len(a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__a = (left + right) >> 1 # the middle
__a = find_max(a , a , a ) # find max in range[left, mid]
__a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 67 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = DetaImageProcessor if is_vision_available() else None
def a__ ( self ):
__a = DetaImageProcessingTester(self )
@property
def a__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def a__ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 39769, "annotations": target}
# encode them
__a = DetaImageProcessor()
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def a__ ( self ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetaImageProcessor(format="coco_panoptic" )
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__a = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class snake_case__ ( snake_case_ ):
_snake_case : Any = """big_bird"""
def __init__( self , lowerCamelCase=50358 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=4096 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=66 , lowerCamelCase="block_sparse" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=64 , lowerCamelCase=3 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , sep_token_id=lowerCamelCase , **lowerCamelCase , )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
__a = classifier_dropout
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | 1 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__:Dict = logging.getLogger()
def _lowerCamelCase( ):
__a = argparse.ArgumentParser()
parser.add_argument("-f" )
__a = parser.parse_args()
return args.f
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ):
__a = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
_snake_case : Optional[int] = None
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__:Dict = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
__a , *__a = self.shape
__a = int(np.prod(lowerCamelCase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(lowerCamelCase )
__a = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCamelCase ):
__a , *__a , __a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(lowerCamelCase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(lowerCamelCase , -1 , 2 )
__a = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( a ):
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a = np.array([np.sin(a ), np.cos(a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(a ), -np.sin(a ), 0.0] )
__a = np.cross(a , a )
origins.append(a )
xs.append(a )
ys.append(a )
zs.append(a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a , axis=0 ) ).float() , width=a , height=a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a )) , )
| 67 | 1 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE__:List[Any] = """examples/"""
SCREAMING_SNAKE_CASE__:Dict = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
SCREAMING_SNAKE_CASE__:int = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
SCREAMING_SNAKE_CASE__:List[str] = """README.md"""
def _lowerCamelCase( a , a , a ):
with open(a , "r" , encoding="utf-8" , newline="\n" ) as f:
__a = f.read()
__a , __a = REPLACE_PATTERNS[pattern]
__a = replace.replace("VERSION" , a )
__a = re_pattern.sub(a , a )
with open(a , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(a )
def _lowerCamelCase( a ):
for folder, directories, fnames in os.walk(a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(a , a ) , a , pattern="examples" )
def _lowerCamelCase( a , a=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a , a , a )
if not patch:
update_version_in_examples(a )
def _lowerCamelCase( ):
__a = "🤗 Transformers currently provides the following architectures"
__a = "1. Want to contribute a new model?"
with open(a , "r" , encoding="utf-8" , newline="\n" ) as f:
__a = f.readlines()
# Find the start of the list.
__a = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__a = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__a = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(a , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(a )
def _lowerCamelCase( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
__a = f.read()
__a = REPLACE_PATTERNS["init"][0].search(a ).groups()[0]
return packaging.version.parse(a )
def _lowerCamelCase( a=False ):
__a = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__a = default_version.base_version
elif patch:
__a = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__a = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__a = input(F"Which version are you releasing? [{default_version}]" )
if len(a ) == 0:
__a = default_version
print(F"Updating version to {version}." )
global_version_update(a , patch=a )
def _lowerCamelCase( ):
__a = get_version()
__a = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
__a = current_version.base_version
# Check with the user we got that right.
__a = input(F"Which version are we developing now? [{dev_version}]" )
if len(a ) == 0:
__a = dev_version
print(F"Updating version to {version}." )
global_version_update(a )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
SCREAMING_SNAKE_CASE__:Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 67 | """simple docstring"""
def _lowerCamelCase( a ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowerCamelCase( a ):
__a = 0
__a = number
while duplicate > 0:
__a , __a = divmod(a , 1_0 )
fact_sum += factorial(a )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
SCREAMING_SNAKE_CASE__:Optional[Any] = int(input("""Enter number: """).strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 67 | 1 |
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
SCREAMING_SNAKE_CASE__:Dict = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def _lowerCamelCase( a ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
SCREAMING_SNAKE_CASE__:Optional[int] = parser.parse_args()
if args.check_lib:
SCREAMING_SNAKE_CASE__:int = importlib.import_module("""transformers""")
SCREAMING_SNAKE_CASE__:int = Path(transformers_module.__file__).parent
else:
SCREAMING_SNAKE_CASE__:List[str] = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 67 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a ):
__a = [0] * len(a )
for i in range(1 , len(a ) ):
# use last results for better performance - dynamic programming
__a = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__a = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__a = j
return prefix_result
def _lowerCamelCase( a ):
return max(prefix_function(a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( a , a , a ):
__a = OmegaConf.load(a )
__a = torch.load(a , map_location="cpu" )["model"]
__a = list(state_dict.keys() )
# extract state_dict for VQVAE
__a = {}
__a = "first_stage_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
# extract state_dict for UNetLDM
__a = {}
__a = "model.diffusion_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
__a = config.model.params.first_stage_config.params
__a = config.model.params.unet_config.params
__a = VQModel(**a ).eval()
vqvae.load_state_dict(a )
__a = UNetLDMModel(**a ).eval()
unet.load_state_dict(a )
__a = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , )
__a = LDMPipeline(a , a , a )
pipeline.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 67 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__:Dict = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:int = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE__:Any = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = """hopper-medium-v2"""
SCREAMING_SNAKE_CASE__:Optional[int] = gym.make(env_name)
SCREAMING_SNAKE_CASE__:str = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE__:Any = env.reset()
SCREAMING_SNAKE_CASE__:List[Any] = 0
SCREAMING_SNAKE_CASE__:Any = 0
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1000
SCREAMING_SNAKE_CASE__:Optional[Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE__:Optional[Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__:Optional[int] = env.step(denorm_actions)
SCREAMING_SNAKE_CASE__:Union[str, Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE__:Any = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 67 | """simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=9 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=8 , lowerCamelCase=0.1 , lowerCamelCase=0.002 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=None , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = encoder_seq_length
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = d_ff
__a = relative_attention_num_buckets
__a = dropout_rate
__a = initializer_factor
__a = eos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = None
__a = decoder_layers
def a__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
__a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
__a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
__a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__a = input_ids.clamp(self.pad_token_id + 1 )
__a = decoder_input_ids.clamp(self.pad_token_id + 1 )
__a = self.get_config()
__a = config.num_attention_heads
__a = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
__a = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = result.last_hidden_state
__a = result.past_key_values
__a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
__a = model(**lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_snake_case : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = True
_snake_case : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_snake_case : Optional[Any] = [0.8, 0.9]
def a__ ( self ):
__a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
__a = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def a__ ( self ):
__a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__a = self.model_tester.prepare_config_and_inputs()
__a = config_and_inputs[0]
__a = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
__a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
__a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__a = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
__a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def a__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def a__ ( self ):
__a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCamelCase ).to(lowerCamelCase )
__a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCamelCase , legacy=lowerCamelCase )
__a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__a = tokenizer(lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase ).input_ids
# fmt: off
__a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
__a = model.generate(input_ids.to(lowerCamelCase ) )
__a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCamelCase( a ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase( a ):
__a = str(a )
__a = [n]
for i in range(1 , len(a ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowerCamelCase( a ):
if len(str(a ) ) > 3:
if not is_prime(int(str(a )[-3:] ) ) or not is_prime(int(str(a )[:3] ) ):
return False
return True
def _lowerCamelCase( a = 1_1 ):
__a = []
__a = 1_3
while len(a ) != count:
if validate(a ):
__a = list_truncated_nums(a )
if all(is_prime(a ) for i in list_nums ):
list_truncated_primes.append(a )
num += 2
return list_truncated_primes
def _lowerCamelCase( ):
return sum(compute_truncated_primes(1_1 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 67 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a ):
# Initialise PyTorch model
__a = MobileBertConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
__a = MobileBertForPreTraining(a )
# Load weights from tf checkpoint
__a = load_tf_weights_in_mobilebert(a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 67 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _lowerCamelCase( a , a , a , a ):
__a = s.rsplit(a , a )
return new.join(a )
def _lowerCamelCase( a ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def _lowerCamelCase( a ):
__a = {}
__a = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__a = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__a = key.replace("res_path." , "res_path.path." )
if key.endswith(".w" ):
__a = rreplace(a , ".w" , ".weight" , 1 )
if key.endswith(".b" ):
__a = rreplace(a , ".b" , ".bias" , 1 )
__a = value.float()
return upgrade
@torch.no_grad()
def _lowerCamelCase( a , a , a=None , a=True ):
from dall_e import Encoder
__a = Encoder()
if os.path.exists(a ):
__a = torch.load(a )
else:
__a = torch.hub.load_state_dict_from_url(a )
if isinstance(a , a ):
__a = ckpt.state_dict()
encoder.load_state_dict(a )
if config_path is not None:
__a = FlavaImageCodebookConfig.from_pretrained(a )
else:
__a = FlavaImageCodebookConfig()
__a = FlavaImageCodebook(a ).eval()
__a = encoder.state_dict()
__a = upgrade_state_dict(a )
hf_model.load_state_dict(a )
__a = hf_model.state_dict()
__a = count_parameters(a )
__a = count_parameters(a )
assert torch.allclose(a , a , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(a )
else:
return hf_state_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE__:str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 67 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__a = input_file.read()
__a = regexp.search(lowerCamelCase )
return match
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a = regexp.finditer(lowerCamelCase )
__a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 67 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = DanceDiffusionPipeline
_snake_case : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_snake_case : List[Any] = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
_snake_case : List[str] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_snake_case : Any = False
_snake_case : List[Any] = False
def a__ ( self ):
torch.manual_seed(0 )
__a = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCamelCase , use_timestep_embedding=lowerCamelCase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
__a = IPNDMScheduler()
__a = {
"unet": unet,
"scheduler": scheduler,
}
return components
def a__ ( self , lowerCamelCase , lowerCamelCase=0 ):
if str(lowerCamelCase ).startswith("mps" ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def a__ ( self ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = DanceDiffusionPipeline(**lowerCamelCase )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = self.get_dummy_inputs(lowerCamelCase )
__a = pipe(**lowerCamelCase )
__a = output.audios
__a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__a = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a__ ( self ):
return super().test_save_load_local()
@skip_mps
def a__ ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def a__ ( self ):
return super().test_save_load_optional_components()
@skip_mps
def a__ ( self ):
return super().test_attention_slicing_forward_pass()
def a__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = torch_device
__a = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = torch.manual_seed(0 )
__a = pipe(generator=lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
__a = output.audios
__a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__a = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self ):
__a = torch_device
__a = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = torch.manual_seed(0 )
__a = pipe(generator=lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
__a = output.audios
__a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__a = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 67 | """simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 67 | 1 |
"""simple docstring"""
SCREAMING_SNAKE_CASE__:str = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 67 | """simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE__:Optional[int] = tuple[int, int]
class snake_case__ :
def __init__( self ):
__a = []
__a = set()
def a__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def a__ ( self ):
return len(self.elements ) == 0
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def a__ ( self , lowerCamelCase ):
if item in self.set:
self.set.remove(lowerCamelCase )
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def a__ ( self ):
return self.elements[0][1]
def a__ ( self ):
((__a) , (__a)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _lowerCamelCase( a , a ):
# euclidean distance
__a = np.array(a )
__a = np.array(a )
return np.linalg.norm(a - b )
def _lowerCamelCase( a , a ):
# integer division by time variable
return consistent_heuristic(a , a ) // t
def _lowerCamelCase( a , a ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase( a , a , a , a ):
__a = g_function[start] + Wa * heuristics[i](a , a )
return ans
def _lowerCamelCase( a , a , a ):
__a = np.chararray((n, n) )
for i in range(a ):
for j in range(a ):
__a = "*"
for i in range(a ):
for j in range(a ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(a ):
for j in range(a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__a = back_pointer[goal]
while x != start:
print(a , end=" " )
__a = back_pointer[x]
print(a )
sys.exit()
def _lowerCamelCase( a ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase( a , a , a , a , a , a , a , a , ):
for itera in range(a ):
open_list[itera].remove_element(a )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a )
__a = -1
__a = float("inf" )
if valid(a ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(a , key(a , 0 , a , a ) )
if neighbours not in close_list_inad:
for var in range(1 , a ):
if key(a , a , a , a ) <= Wa * key(
a , 0 , a , a ):
open_list[j].put(
a , key(a , a , a , a ) )
def _lowerCamelCase( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE__:Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE__:str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE__:int = make_common_ground()
SCREAMING_SNAKE_CASE__:List[str] = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE__:str = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 20
SCREAMING_SNAKE_CASE__:Dict = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[Any] = (n - 1, n - 1)
SCREAMING_SNAKE_CASE__:List[str] = 1
def _lowerCamelCase( a , a , a ):
__a = {start: 0, goal: float("inf" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(a ):
open_list.append(PriorityQueue() )
open_list[i].put(a , key(a , a , a , a ) )
__a = []
__a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a , __a = open_list[i].top_show()
visited.add(a )
expand_state(
a , a , a , a , a , a , a , a , )
close_list_inad.append(a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a = open_list[0].top_show()
visited.add(a )
expand_state(
a , 0 , a , a , a , a , a , a , )
close_list_anchor.append(a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 67 | 1 |
"""simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
SCREAMING_SNAKE_CASE__:Any = yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
SCREAMING_SNAKE_CASE__:Any = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
SCREAMING_SNAKE_CASE__:str = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__:Tuple = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__:int = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
SCREAMING_SNAKE_CASE__:Dict = """\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__:List[Any] = (
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
SCREAMING_SNAKE_CASE__:Tuple = """\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__:Tuple = (
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
SCREAMING_SNAKE_CASE__:List[Any] = """\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__:Optional[Any] = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
SCREAMING_SNAKE_CASE__:List[Any] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__:Dict = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
SCREAMING_SNAKE_CASE__:int = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
SCREAMING_SNAKE_CASE__:List[Any] = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
SCREAMING_SNAKE_CASE__:Dict = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__:Union[str, Any] = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
SCREAMING_SNAKE_CASE__:Tuple = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
SCREAMING_SNAKE_CASE__:str = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
SCREAMING_SNAKE_CASE__:Optional[int] = """\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__:Tuple = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
SCREAMING_SNAKE_CASE__:List[Any] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
SCREAMING_SNAKE_CASE__:Tuple = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
SCREAMING_SNAKE_CASE__:Tuple = """\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__:Optional[int] = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
SCREAMING_SNAKE_CASE__:List[str] = """"""
SCREAMING_SNAKE_CASE__:Tuple = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
SCREAMING_SNAKE_CASE__:Optional[int] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
SCREAMING_SNAKE_CASE__:Optional[int] = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _lowerCamelCase( a , a ):
assert ReadMe.from_string(a , a ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _lowerCamelCase( a , a ):
with pytest.raises(a , match=re.escape(expected_error.format(path="root" ) ) ):
__a = ReadMe.from_string(a , a )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase( a , a ):
with pytest.raises(a , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(a , a )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase( a ):
ReadMe.from_string(a , a , suppress_parsing_errors=a )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _lowerCamelCase( a , a ):
with tempfile.TemporaryDirectory() as tmp_dir:
__a = Path(a ) / "README.md"
with open(a , "w+" ) as readme_file:
readme_file.write(a )
__a = ReadMe.from_readme(a , a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _lowerCamelCase( a , a ):
with tempfile.TemporaryDirectory() as tmp_dir:
__a = Path(a ) / "README.md"
with open(a , "w+" ) as readme_file:
readme_file.write(a )
__a = expected_error.format(path=a )
with pytest.raises(a , match=re.escape(a ) ):
__a = ReadMe.from_readme(a , a )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase( a , a ):
with tempfile.TemporaryDirectory() as tmp_dir:
__a = Path(a ) / "README.md"
with open(a , "w+" ) as readme_file:
readme_file.write(a )
__a = expected_error.format(path=a )
with pytest.raises(a , match=re.escape(a ) ):
ReadMe.from_readme(a , a )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _lowerCamelCase( a ):
with tempfile.TemporaryDirectory() as tmp_dir:
__a = Path(a ) / "README.md"
with open(a , "w+" ) as readme_file:
readme_file.write(a )
ReadMe.from_readme(a , a , suppress_parsing_errors=a )
| 67 | """simple docstring"""
SCREAMING_SNAKE_CASE__:Any = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase( a ):
__a = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__a = Stack()
__a = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a ) )
elif i in operators:
# RULE 2
operator_stack.push(a )
elif i == ")":
# RULE 4
__a = operator_stack.peek()
operator_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operators[opr](a , a )
operand_stack.push(a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 67 | 1 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 128 , lowerCamelCase = 256 , lowerCamelCase = 2000.0 , lowerCamelCase = 768 , lowerCamelCase = 12 , lowerCamelCase = 12 , lowerCamelCase = 64 , lowerCamelCase = 2048 , lowerCamelCase = 0.1 , ):
super().__init__()
__a = nn.Sequential(
nn.Linear(lowerCamelCase , d_model * 4 , bias=lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCamelCase ) , nn.SiLU() , )
__a = nn.Embedding(lowerCamelCase , lowerCamelCase )
__a = False
__a = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
__a = nn.Dropout(p=lowerCamelCase )
__a = nn.ModuleList()
for lyr_num in range(lowerCamelCase ):
# FiLM conditional T5 decoder
__a = DecoderLayer(d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase )
self.decoders.append(lowerCamelCase )
__a = TaLayerNorm(lowerCamelCase )
__a = nn.Dropout(p=lowerCamelCase )
__a = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a , __a , __a = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__a = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__a = self.conditioning_emb(lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__a = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__a = torch.broadcast_to(
torch.arange(lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__a = self.position_encoding(lowerCamelCase )
__a = self.continuous_inputs_projection(lowerCamelCase )
inputs += position_encodings
__a = self.dropout(lowerCamelCase )
# decoder: No padding present.
__a = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__a = [(x, self.encoder_decoder_mask(lowerCamelCase , lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__a = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__a = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__a = lyr(
lowerCamelCase , conditioning_emb=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )[0]
__a = self.decoder_norm(lowerCamelCase )
__a = self.post_dropout(lowerCamelCase )
__a = self.spec_out(lowerCamelCase )
return spec_out
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1E-6 ):
super().__init__()
__a = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , dropout_rate=lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , dropout_rate=lowerCamelCase , layer_norm_epsilon=lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase , layer_norm_epsilon=lowerCamelCase ) )
def a__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
__a = self.layer[0](
lowerCamelCase , conditioning_emb=lowerCamelCase , attention_mask=lowerCamelCase , )
if encoder_hidden_states is not None:
__a = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
__a = self.layer[1](
lowerCamelCase , key_value_states=lowerCamelCase , attention_mask=lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
__a = self.layer[-1](lowerCamelCase , lowerCamelCase )
return (hidden_states,)
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
super().__init__()
__a = TaLayerNorm(lowerCamelCase )
__a = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase )
__a = Attention(query_dim=lowerCamelCase , heads=lowerCamelCase , dim_head=lowerCamelCase , out_bias=lowerCamelCase , scale_qk=lowerCamelCase )
__a = nn.Dropout(lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ):
# pre_self_attention_layer_norm
__a = self.layer_norm(lowerCamelCase )
if conditioning_emb is not None:
__a = self.FiLMLayer(lowerCamelCase , lowerCamelCase )
# Self-attention block
__a = self.attention(lowerCamelCase )
__a = hidden_states + self.dropout(lowerCamelCase )
return hidden_states
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
super().__init__()
__a = Attention(query_dim=lowerCamelCase , heads=lowerCamelCase , dim_head=lowerCamelCase , out_bias=lowerCamelCase , scale_qk=lowerCamelCase )
__a = TaLayerNorm(lowerCamelCase , eps=lowerCamelCase )
__a = nn.Dropout(lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ):
__a = self.layer_norm(lowerCamelCase )
__a = self.attention(
lowerCamelCase , encoder_hidden_states=lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
__a = hidden_states + self.dropout(lowerCamelCase )
return layer_output
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
super().__init__()
__a = TaDenseGatedActDense(d_model=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase )
__a = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase )
__a = TaLayerNorm(lowerCamelCase , eps=lowerCamelCase )
__a = nn.Dropout(lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase=None ):
__a = self.layer_norm(lowerCamelCase )
if conditioning_emb is not None:
__a = self.film(lowerCamelCase , lowerCamelCase )
__a = self.DenseReluDense(lowerCamelCase )
__a = hidden_states + self.dropout(lowerCamelCase )
return hidden_states
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
super().__init__()
__a = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
__a = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
__a = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
__a = nn.Dropout(lowerCamelCase )
__a = NewGELUActivation()
def a__ ( self , lowerCamelCase ):
__a = self.act(self.wi_a(lowerCamelCase ) )
__a = self.wi_a(lowerCamelCase )
__a = hidden_gelu * hidden_linear
__a = self.dropout(lowerCamelCase )
__a = self.wo(lowerCamelCase )
return hidden_states
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase , lowerCamelCase=1E-6 ):
super().__init__()
__a = nn.Parameter(torch.ones(lowerCamelCase ) )
__a = eps
def a__ ( self , lowerCamelCase ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__a = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCamelCase )
__a = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__a = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class snake_case__ ( nn.Module ):
def a__ ( self , lowerCamelCase ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(lowerCamelCase , 3.0 )) ))
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase , lowerCamelCase ):
super().__init__()
__a = nn.Linear(lowerCamelCase , out_features * 2 , bias=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = self.scale_bias(lowerCamelCase )
__a , __a = torch.chunk(lowerCamelCase , 2 , -1 )
__a = x * (1 + scale) + shift
return x
| 67 | """simple docstring"""
from math import pi
def _lowerCamelCase( a , a ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 67 | 1 |
"""simple docstring"""
import argparse
import os
import re
SCREAMING_SNAKE_CASE__:List[str] = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE__:Dict = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE__:Dict = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def _lowerCamelCase( a , a = False ):
with open(a , "r" , encoding="utf-8" ) as f:
__a = f.read()
__a = content.split("\n" )
__a = []
__a = 0
while line_idx < len(a ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__a = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__a = sorted(a , key=lambda a : _re_identifier.search(a ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(a , "w" , encoding="utf-8" ) as f:
f.write("\n".join(a ) )
elif "\n".join(a ) != content:
return True
def _lowerCamelCase( a = False ):
__a = [os.path.join(a , a ) for f in os.listdir(a ) if f.endswith(".py" )]
__a = [sort_auto_mapping(a , overwrite=a ) for fname in fnames]
if not overwrite and any(a ):
__a = [f for f, d in zip(a , a ) if d]
raise ValueError(
F"The following files have auto mappings that need sorting: {', '.join(a )}. Run `make style` to fix"
" this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
SCREAMING_SNAKE_CASE__:List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 67 | """simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Dict = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = IMAGENET_DEFAULT_MEAN , lowerCamelCase = IMAGENET_DEFAULT_STD , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a = int((256 / 224) * size["shortest_edge"] )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
lowerCamelCase , size=(size_dict["height"], size_dict["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(lowerCamelCase , lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(lowerCamelCase , lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = tempfile.mkdtemp()
# fmt: off
__a = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__a = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__a = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__a = {"unk_token": "<unk>"}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase ) )
__a = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__a = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def a__ ( self , **lowerCamelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def a__ ( self , **lowerCamelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def a__ ( self , **lowerCamelCase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def a__ ( self ):
shutil.rmtree(self.tmpdirname )
def a__ ( self ):
__a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = self.get_image_processor()
__a = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__a = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
__a = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__a = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def a__ ( self ):
__a = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__a = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__a = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def a__ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__a = self.prepare_image_inputs()
__a = image_processor(lowerCamelCase , return_tensors="np" )
__a = processor(images=lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__a = "lower newer"
__a = processor(text=lowerCamelCase )
__a = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__a = "lower newer"
__a = self.prepare_image_inputs()
__a = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def a__ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__a = self.prepare_image_inputs()
__a = self.prepare_image_inputs()
__a = processor(images=lowerCamelCase , visual_prompt=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def a__ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(lowerCamelCase )
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 67 | """simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=None , lowerCamelCase=2 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = ViTForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = ViTForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.type_sequence_label_size
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case : List[Any] = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case : int = True
_snake_case : int = False
_snake_case : str = False
_snake_case : Optional[Any] = False
def a__ ( self ):
__a = ViTModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__a = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCamelCase )
__a = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(lowerCamelCase , interpolate_pos_encoding=lowerCamelCase )
# verify the logits
__a = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase )
__a = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a__ ( self ):
__a = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" )
__a = inputs.pixel_values.to(lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a = model(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
SCREAMING_SNAKE_CASE__:List[str] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def _lowerCamelCase( a ):
__a = {}
with open(a , "r" ) as file:
for line_number, line in enumerate(a ):
__a = line.strip()
if line:
__a = line.split()
__a = line_number
__a = words[0]
__a = value
return result
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
__a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a ):
__a = PARAM_MAPPING[full_name.split("." )[-1]]
__a = "param"
if weight_type is not None and weight_type != "param":
__a = getattr(a , a ).shape
elif weight_type is not None and weight_type == "param":
__a = hf_pointer
for attribute in hf_param_name.split("." ):
__a = getattr(a , a )
__a = shape_pointer.shape
# let's reduce dimension
__a = value[0]
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
__a = getattr(a , a )
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a , a , a , a ):
__a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a ):
__a = PARAM_MAPPING[full_name.split("." )[-1]]
__a = "param"
if weight_type is not None and weight_type != "param":
__a = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__a = ".".join([key, hf_param_name] )
else:
__a = key
__a = value if "lm_head" in full_key else value[0]
SCREAMING_SNAKE_CASE__:Dict = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def _lowerCamelCase( a , a , a=None , a=None ):
__a = False
for key, mapped_key in MAPPING.items():
__a = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a = "weight"
else:
__a = None
if hf_dict is not None:
rename_dict(a , a , a , a , a )
else:
set_recursively(a , a , a , a , a )
return is_used
return is_used
def _lowerCamelCase( a , a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
else:
__a = load_wavaveca_layer(a , a , a )
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
@torch.no_grad()
def _lowerCamelCase( a , a , a=None , a=None , a=True , a=False ):
if config_path is not None:
__a = WavaVecaConfig.from_pretrained(a )
else:
__a = WavaVecaConfig()
if is_seq_class:
__a = read_txt_into_dict(a )
__a = idalabel
__a = WavaVecaForSequenceClassification(a )
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
feature_extractor.save_pretrained(a )
elif is_finetuned:
if dict_path:
__a = Dictionary.load(a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a = target_dict.pad_index
__a = target_dict.bos_index
__a = target_dict.eos_index
__a = len(target_dict.symbols )
__a = os.path.join(a , "vocab.json" )
if not os.path.isdir(a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(a ) )
return
os.makedirs(a , exist_ok=a )
__a = target_dict.indices
# fairseq has the <pad> and <s> switched
__a = 0
__a = 1
with open(a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(a , a )
__a = WavaVecaCTCTokenizer(
a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=a , )
__a = True if config.feat_extract_norm == "layer" else False
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
__a = WavaVecaProcessor(feature_extractor=a , tokenizer=a )
processor.save_pretrained(a )
__a = WavaVecaForCTC(a )
else:
__a = WavaVecaForPreTraining(a )
if is_finetuned or is_seq_class:
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__a = argparse.Namespace(task="audio_pretraining" )
__a = fairseq.tasks.setup_task(a )
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a )
__a = model[0].eval()
recursively_load_weights(a , a , not is_finetuned )
hf_wavavec.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
SCREAMING_SNAKE_CASE__:Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__:Optional[int] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 67 | """simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["shortest_edge"] * h / w )
__a = self.size["shortest_edge"]
elif w > h:
__a = self.size["shortest_edge"]
__a = int(self.size["shortest_edge"] * w / h )
else:
__a = self.size["shortest_edge"]
__a = self.size["shortest_edge"]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__a = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = DetaImageProcessor if is_vision_available() else None
def a__ ( self ):
__a = DetaImageProcessingTester(self )
@property
def a__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def a__ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ):
# prepare image and target
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"image_id": 39769, "annotations": target}
# encode them
__a = DetaImageProcessor()
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def a__ ( self ):
# prepare image, target and masks_path
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__a = json.loads(f.read() )
__a = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__a = DetaImageProcessor(format="coco_panoptic" )
__a = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__a = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 67 | 1 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__a = input_file.read()
__a = regexp.search(lowerCamelCase )
return match
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a = regexp.finditer(lowerCamelCase )
__a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 67 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__:Dict = logging.getLogger()
def _lowerCamelCase( ):
__a = argparse.ArgumentParser()
parser.add_argument("-f" )
__a = parser.parse_args()
return args.f
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ):
__a = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__:Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : Optional[int] = SpeechTaTokenizer
_snake_case : Optional[int] = False
_snake_case : List[Any] = True
def a__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = SpeechTaTokenizer(lowerCamelCase )
__a = AddedToken("<mask>" , lstrip=lowerCamelCase , rstrip=lowerCamelCase )
__a = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self , lowerCamelCase ):
__a = "this is a test"
__a = "this is a test"
return input_text, output_text
def a__ ( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=20 , lowerCamelCase=5 ):
__a , __a = self.get_input_output_texts(lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = tokenizer.decode(lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
return text, ids
def a__ ( self ):
__a = "<pad>"
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def a__ ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(lowerCamelCase ) , 81 )
def a__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def a__ ( self ):
__a = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__a = tokenizer.vocab_size
__a = len(lowerCamelCase )
self.assertNotEqual(lowerCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__a = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__a = tokenizer.add_tokens(lowerCamelCase )
__a = tokenizer.vocab_size
__a = len(lowerCamelCase )
self.assertNotEqual(lowerCamelCase , 0 )
self.assertEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , len(lowerCamelCase ) )
self.assertEqual(lowerCamelCase , all_size + len(lowerCamelCase ) )
__a = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowerCamelCase )
self.assertGreaterEqual(len(lowerCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__a = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__a = tokenizer.add_special_tokens(lowerCamelCase )
__a = tokenizer.vocab_size
__a = len(lowerCamelCase )
self.assertNotEqual(lowerCamelCase , 0 )
self.assertEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , len(lowerCamelCase ) )
self.assertEqual(lowerCamelCase , all_size_a + len(lowerCamelCase ) )
__a = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowerCamelCase )
self.assertGreaterEqual(len(lowerCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
__a = self.get_tokenizer()
__a = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(lowerCamelCase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
__a = tokenizer.convert_tokens_to_ids(lowerCamelCase )
# fmt: off
self.assertListEqual(lowerCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__a = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def a__ ( self ):
# Use custom sequence because this tokenizer does not handle numbers.
__a = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
__a = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowerCamelCase , )
| 67 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowerCamelCase( a , a = "cpu" , a = None ):
__a = torch.load(a , map_location=a )
for k, v in tqdm(state_dict.items() ):
if not isinstance(a , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
__a = v.half()
if save_path is None: # overwrite src_path
__a = src_path
torch.save(a , a )
if __name__ == "__main__":
fire.Fire(convert)
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = ["""input_ids""", """attention_mask"""]
_snake_case : Dict = GPTaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
__a = kwargs.pop("add_bos_token" , lowerCamelCase )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[Any] = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
SCREAMING_SNAKE_CASE__:int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _lowerCamelCase( a ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__a = model_type_to_module_name(a )
__a = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(a , a )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(a , "__name__" , a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__a = importlib.import_module("transformers" )
if hasattr(a , a ):
return getattr(a , a )
return None
def _lowerCamelCase( a , a = None , a = False , a = False , a = None , a = None , a = None , a = False , **a , ):
__a = get_file_from_repo(
a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(a , encoding="utf-8" ) as reader:
return json.load(a )
class snake_case__ :
def __init__( self ):
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase )
def a__ ( cls , lowerCamelCase , **lowerCamelCase ):
__a = kwargs.pop("config" , lowerCamelCase )
__a = kwargs.pop("trust_remote_code" , lowerCamelCase )
__a = True
__a , __a = ImageProcessingMixin.get_image_processor_dict(lowerCamelCase , **lowerCamelCase )
__a = config_dict.get("image_processor_type" , lowerCamelCase )
__a = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
__a = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__a = config_dict.pop("feature_extractor_type" , lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
__a = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
__a = config_dict["auto_map"]["AutoFeatureExtractor"]
__a = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
__a = AutoConfig.from_pretrained(lowerCamelCase , **lowerCamelCase )
# It could be in `config.image_processor_type``
__a = getattr(lowerCamelCase , "image_processor_type" , lowerCamelCase )
if hasattr(lowerCamelCase , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
__a = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
__a = image_processor_class_from_name(lowerCamelCase )
__a = image_processor_auto_map is not None
__a = image_processor_class is not None or type(lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
__a = resolve_trust_remote_code(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if has_remote_code and trust_remote_code:
__a = get_class_from_dynamic_module(
lowerCamelCase , lowerCamelCase , **lowerCamelCase )
__a = kwargs.pop("code_revision" , lowerCamelCase )
if os.path.isdir(lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowerCamelCase , **lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowerCamelCase , **lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
__a = IMAGE_PROCESSOR_MAPPING[type(lowerCamelCase )]
return image_processor_class.from_dict(lowerCamelCase , **lowerCamelCase )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
IMAGE_PROCESSOR_MAPPING.register(lowerCamelCase , lowerCamelCase )
| 67 | """simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCamelCase( a , a , a ):
__a = hf_hub_url(repo_id=a , path=a , revision=a )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a )}"
| 67 | 1 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__:Optional[Any] = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__:int = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__:List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__:str = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__:str = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__:List[str] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 67 | """simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
if len(a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__a = (left + right) >> 1 # the middle
__a = find_max(a , a , a ) # find max in range[left, mid]
__a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 67 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : int = CLIPTokenizer
_snake_case : int = CLIPTokenizerFast
_snake_case : Any = True
_snake_case : List[Any] = {}
_snake_case : int = False
def a__ ( self ):
super().setUp()
# fmt: off
__a = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__a = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__a = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
__a = {"unk_token": "<unk>"}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase ) )
def a__ ( self , **lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def a__ ( self , **lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = "lower newer"
__a = "lower newer"
return input_text, output_text
def a__ ( self ):
__a = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a = "lower newer"
__a = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
__a = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokens + [tokenizer.unk_token]
__a = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
@require_ftfy
def a__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
__a = tokenizer_s.tokenize(lowerCamelCase )
__a = tokenizer_r.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__a = "xa\u0303y" + " " + "x\xe3y"
__a = tokenizer_s.tokenize(lowerCamelCase )
__a = tokenizer_r.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Test that the tokenization is identical on unicode of space type
__a = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__a = tokenizer_s.tokenize(lowerCamelCase )
__a = tokenizer_r.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Test that the tokenization is identical on unicode of line break type
__a = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__a = tokenizer_s.tokenize(lowerCamelCase )
__a = tokenizer_r.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__a = F"{text_of_1_token} {text_of_1_token}"
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__a = F" {text}"
__a = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , )
__a = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ) + 1, 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
def a__ ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def a__ ( self ):
super().test_tokenization_python_rust_equals()
def a__ ( self ):
# CLIP always lower cases letters
pass
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class snake_case__ ( snake_case_ ):
_snake_case : Any = """big_bird"""
def __init__( self , lowerCamelCase=50358 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=4096 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=66 , lowerCamelCase="block_sparse" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=64 , lowerCamelCase=3 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , sep_token_id=lowerCamelCase , **lowerCamelCase , )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
__a = classifier_dropout
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
__a = {0: "batch", 1: "choice", 2: "sequence"}
else:
__a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( ):
__a = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
__a = 6
__a = 1
__a = 1_9_0_1
__a = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__a = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
__a = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
__a = day - days_per_month[month - 2]
if month > 1_2:
year += 1
__a = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 67 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[int] = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
_snake_case : Optional[int] = None
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowerCamelCase )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 67 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class snake_case__ ( snake_case_ ):
_snake_case : Dict = """bert-generation"""
def __init__( self , lowerCamelCase=50358 , lowerCamelCase=1024 , lowerCamelCase=24 , lowerCamelCase=16 , lowerCamelCase=4096 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase="absolute" , lowerCamelCase=True , **lowerCamelCase , ):
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
| 67 | """simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : torch.Tensor # [batch_size x 3]
_snake_case : int
_snake_case : int
_snake_case : float
_snake_case : float
_snake_case : Tuple[int]
def a__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self ):
__a = torch.arange(self.height * self.width )
__a = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def a__ ( self ):
__a , *__a = self.shape
__a = int(np.prod(lowerCamelCase ) )
__a = self.get_image_coords()
__a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__a = self.get_camera_rays(lowerCamelCase )
__a = rays.view(lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self , lowerCamelCase ):
__a , *__a , __a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__a = coords.view(lowerCamelCase , -1 , 2 )
__a = self.resolution()
__a = self.fov()
__a = (flat.float() / (res - 1)) * 2 - 1
__a = fracs * torch.tan(fov / 2 )
__a = fracs.view(lowerCamelCase , -1 , 2 )
__a = (
self.z.view(lowerCamelCase , 1 , 3 )
+ self.x.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__a = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase )
__a = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase , *lowerCamelCase , 2 , 3 )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase , height=lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase( a ):
__a = []
__a = []
__a = []
__a = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__a = np.array([np.sin(a ), np.cos(a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__a = -z * 4
__a = np.array([np.cos(a ), -np.sin(a ), 0.0] )
__a = np.cross(a , a )
origins.append(a )
xs.append(a )
ys.append(a )
zs.append(a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(a , axis=0 ) ).float() , width=a , height=a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(a )) , )
| 67 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _lowerCamelCase( a ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def _lowerCamelCase( a ):
# word like '180' or '身高' or '神'
for char in word:
__a = ord(a )
if not _is_chinese_char(a ):
return 0
return 1
def _lowerCamelCase( a ):
__a = set()
for token in tokens:
__a = len(a ) > 1 and is_chinese(a )
if chinese_word:
word_set.add(a )
__a = list(a )
return word_list
def _lowerCamelCase( a , a ):
if not chinese_word_set:
return bert_tokens
__a = max([len(a ) for w in chinese_word_set] )
__a = bert_tokens
__a , __a = 0, len(a )
while start < end:
__a = True
if is_chinese(bert_word[start] ):
__a = min(end - start , a )
for i in range(a , 1 , -1 ):
__a = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__a = "##" + bert_word[j]
__a = start + i
__a = False
break
if single_word:
start += 1
return bert_word
def _lowerCamelCase( a , a , a ):
__a = []
for i in range(0 , len(a ) , 1_0_0 ):
__a = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws
__a = [get_chinese_word(a ) for r in res]
ltp_res.extend(a )
assert len(a ) == len(a )
__a = []
for i in range(0 , len(a ) , 1_0_0 ):
__a = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=a , truncation=a , max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(a ) == len(a )
__a = []
for input_ids, chinese_word in zip(a , a ):
__a = []
for id in input_ids:
__a = bert_tokenizer._convert_id_to_token(a )
input_tokens.append(a )
__a = add_sub_symbol(a , a )
__a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a ):
if token[:2] == "##":
__a = token[2:]
# save chinese tokens' pos
if len(a ) == 1 and _is_chinese_char(ord(a ) ):
ref_id.append(a )
ref_ids.append(a )
assert len(a ) == len(a )
return ref_ids
def _lowerCamelCase( a ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
__a = f.readlines()
__a = [line.strip() for line in data if len(a ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__a = LTP(args.ltp ) # faster in GPU device
__a = BertTokenizer.from_pretrained(args.bert )
__a = prepare_ref(a , a , a )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
__a = [json.dumps(a ) + "\n" for ref in ref_ids]
f.writelines(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Dict = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
SCREAMING_SNAKE_CASE__:Dict = parser.parse_args()
main(args)
| 67 | """simple docstring"""
def _lowerCamelCase( a ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowerCamelCase( a ):
__a = 0
__a = number
while duplicate > 0:
__a , __a = divmod(a , 1_0 )
fact_sum += factorial(a )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
SCREAMING_SNAKE_CASE__:Optional[Any] = int(input("""Enter number: """).strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a = 5_0 ):
__a = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 67 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[Any] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class snake_case__ ( snake_case_ ):
_snake_case : Any = """data2vec-vision"""
def __init__( self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=[3, 5, 7, 11] , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = use_mask_token
__a = use_absolute_position_embeddings
__a = use_relative_position_bias
__a = use_shared_relative_position_bias
__a = layer_scale_init_value
__a = drop_path_rate
__a = use_mean_pooling
# decode head attributes (semantic segmentation)
__a = out_indices
__a = pool_scales
# auxiliary head attributes (semantic segmentation)
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = semantic_loss_ignore_index
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = version.parse("""1.11""" )
@property
def a__ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def a__ ( self ):
return 1E-4
| 67 | """simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( a , a , a ):
__a = OmegaConf.load(a )
__a = torch.load(a , map_location="cpu" )["model"]
__a = list(state_dict.keys() )
# extract state_dict for VQVAE
__a = {}
__a = "first_stage_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
# extract state_dict for UNetLDM
__a = {}
__a = "model.diffusion_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
__a = config.model.params.first_stage_config.params
__a = config.model.params.unet_config.params
__a = VQModel(**a ).eval()
vqvae.load_state_dict(a )
__a = UNetLDMModel(**a ).eval()
unet.load_state_dict(a )
__a = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , )
__a = LDMPipeline(a , a , a )
pipeline.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 67 | 1 |
"""simple docstring"""
def _lowerCamelCase( a ):
__a = len(a )
while cur > 1:
# Find the maximum number in arr
__a = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__a = arr[mi::-1] + arr[mi + 1 : len(a )]
# Reverse whole list
__a = arr[cur - 1 :: -1] + arr[cur : len(a )]
cur -= 1
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__:List[Any] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=64 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = embedding_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = MegatronBertModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase )
__a = model(lowerCamelCase , token_type_ids=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = MegatronBertForMaskedLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = MegatronBertForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = MegatronBertForNextSentencePrediction(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = MegatronBertForPreTraining(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , next_sentence_label=lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = MegatronBertForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.num_labels
__a = MegatronBertForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.num_labels
__a = MegatronBertForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.num_choices
__a = MegatronBertForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case : Any = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Dict = True
# test_resize_embeddings = False
_snake_case : str = False
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
__a = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class in get_values(lowerCamelCase ):
__a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase )
__a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
return inputs_dict
def a__ ( self ):
__a = MegatronBertModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCamelCase )
def _lowerCamelCase( a ):
return torch.tensor(
a , dtype=torch.long , device=a , )
SCREAMING_SNAKE_CASE__:Optional[int] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def a__ ( self ):
__a = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
__a = os.path.join(os.environ["MYDIR"] , lowerCamelCase )
__a = MegatronBertModel.from_pretrained(lowerCamelCase )
model.to(lowerCamelCase )
model.half()
__a = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , lowerCamelCase )
__a = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
__a = output[0, ii, jj]
__a = expected[3 * ii + jj]
__a = "ii={} jj={} a={} b={}".format(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertTrue(math.isclose(lowerCamelCase , lowerCamelCase , rel_tol=lowerCamelCase , abs_tol=lowerCamelCase ) , msg=lowerCamelCase )
| 67 | """simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=9 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=8 , lowerCamelCase=0.1 , lowerCamelCase=0.002 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=None , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = encoder_seq_length
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = d_ff
__a = relative_attention_num_buckets
__a = dropout_rate
__a = initializer_factor
__a = eos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = None
__a = decoder_layers
def a__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
__a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
__a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
__a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__a = input_ids.clamp(self.pad_token_id + 1 )
__a = decoder_input_ids.clamp(self.pad_token_id + 1 )
__a = self.get_config()
__a = config.num_attention_heads
__a = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
__a = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = result.last_hidden_state
__a = result.past_key_values
__a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
__a = model(**lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_snake_case : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = True
_snake_case : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_snake_case : Optional[Any] = [0.8, 0.9]
def a__ ( self ):
__a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
__a = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def a__ ( self ):
__a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__a = self.model_tester.prepare_config_and_inputs()
__a = config_and_inputs[0]
__a = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
__a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
__a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__a = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
__a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def a__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def a__ ( self ):
__a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCamelCase ).to(lowerCamelCase )
__a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCamelCase , legacy=lowerCamelCase )
__a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__a = tokenizer(lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase ).input_ids
# fmt: off
__a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
__a = model.generate(input_ids.to(lowerCamelCase ) )
__a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
if len(a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__a = (left + right) >> 1 # the middle
__a = find_max(a , a , a ) # find max in range[left, mid]
__a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 67 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a ):
# Initialise PyTorch model
__a = MobileBertConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
__a = MobileBertForPreTraining(a )
# Load weights from tf checkpoint
__a = load_tf_weights_in_mobilebert(a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class snake_case__ :
def __init__( self , lowerCamelCase ):
__a = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(lowerCamelCase )
self.set_fail_transitions()
def a__ ( self , lowerCamelCase , lowerCamelCase ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def a__ ( self , lowerCamelCase ):
__a = 0
for character in keyword:
__a = self.find_next_state(lowerCamelCase , lowerCamelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__a = len(self.adlist ) - 1
else:
__a = next_state
self.adlist[current_state]["output"].append(lowerCamelCase )
def a__ ( self ):
__a = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCamelCase )
__a = 0
while q:
__a = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCamelCase )
__a = self.adlist[r]["fail_state"]
while (
self.find_next_state(lowerCamelCase , self.adlist[child]["value"] ) is None
and state != 0
):
__a = self.adlist[state]["fail_state"]
__a = self.find_next_state(
lowerCamelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
__a = 0
__a = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def a__ ( self , lowerCamelCase ):
__a = {} # returns a dict with keywords and list of its occurrences
__a = 0
for i in range(len(lowerCamelCase ) ):
while (
self.find_next_state(lowerCamelCase , string[i] ) is None
and current_state != 0
):
__a = self.adlist[current_state]["fail_state"]
__a = self.find_next_state(lowerCamelCase , string[i] )
if next_state is None:
__a = 0
else:
__a = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__a = []
result[key].append(i - len(lowerCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__a = input_file.read()
__a = regexp.search(lowerCamelCase )
return match
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a = regexp.finditer(lowerCamelCase )
__a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 67 | 1 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
def _lowerCamelCase( a , a , a , a=None , a=None ):
# Recurse if needed
if "." in tensor_name:
__a = tensor_name.split("." )
for split in splits[:-1]:
__a = getattr(a , a )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
__a = new_module
__a = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
__a = tensor_name in module._buffers
__a = getattr(a , a )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
__a = False
__a = False
if is_buffer or not is_bitsandbytes_available():
__a = False
__a = False
else:
__a = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__a = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__a = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__a = old_value.to(a )
elif isinstance(a , torch.Tensor ):
__a = value.to("cpu" )
if value.dtype == torch.inta:
__a = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
__a = torch.tensor(a , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , a ) and fpaa_statistics is None:
__a = new_value.T
__a = old_value.__dict__
if is_abit:
__a = bnb.nn.IntaParams(a , requires_grad=a , **a ).to(a )
elif is_abit:
__a = bnb.nn.Paramsabit(a , requires_grad=a , **a ).to(a )
__a = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(a ) )
else:
if value is None:
__a = old_value.to(a )
elif isinstance(a , torch.Tensor ):
__a = value.to(a )
else:
__a = torch.tensor(a , device=a )
if is_buffer:
__a = new_value
else:
__a = nn.Parameter(a , requires_grad=old_value.requires_grad )
__a = new_value
def _lowerCamelCase( a , a=None , a=None , a=None , a=False ):
for name, module in model.named_children():
if current_key_name is None:
__a = []
current_key_name.append(a )
if (isinstance(a , nn.Linear ) or isinstance(a , a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(a , a ):
__a , __a = module.weight.shape
else:
__a = module.in_features
__a = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__a = bnb.nn.LinearabitLt(
a , a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__a = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__a = bnb.nn.Linearabit(
a , a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__a = True
# Store the module class in case we need to transpose the weight later
__a = type(a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(a )
if len(list(module.children() ) ) > 0:
__a , __a = _replace_with_bnb_linear(
a , a , a , a , has_been_replaced=a , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowerCamelCase( a , a=None , a=None , a=None ):
__a = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
__a , __a = _replace_with_bnb_linear(
a , a , a , a )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def _lowerCamelCase( *a , **a ):
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , a , )
return replace_with_bnb_linear(*a , **a )
def _lowerCamelCase( *a , **a ):
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , a , )
return set_module_quantized_tensor_to_device(*a , **a )
def _lowerCamelCase( a ):
__a = deepcopy(a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__a = find_tied_parameters(a )
# For compatibility with Accelerate < 0.18
if isinstance(a , a ):
__a = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__a = sum(a , [] )
__a = len(a ) > 0
# Check if it is a base model
__a = not hasattr(a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__a = list(model.named_children() )
__a = [list_modules[-1][0]]
# add last module together with tied weights
__a = set(a ) - set(a )
__a = list(set(a ) ) + list(a )
# remove ".weight" from the keys
__a = [".weight", ".bias"]
__a = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__a = name.replace(a , "" )
filtered_module_names.append(a )
return filtered_module_names
| 67 | """simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 67 | 1 |
"""simple docstring"""
from random import randint, random
def _lowerCamelCase( a , a , a , a = False , a = False , a = 5 , ):
__a = [[-1] * number_of_cells] # Create a highway without any car
__a = 0
__a = max(a , 0 )
while i < number_of_cells:
__a = (
randint(0 , a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCamelCase( a , a ):
__a = 0
__a = highway_now[car_index + 1 :]
for cell in range(len(a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(a , -1 )
def _lowerCamelCase( a , a , a ):
__a = len(a )
# Beforce calculations, the highway is empty
__a = [-1] * number_of_cells
for car_index in range(a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__a = min(highway_now[car_index] + 1 , a )
# Number of empty cell before the next car
__a = get_distance(a , a ) - 1
# We can't have the car causing an accident
__a = min(next_highway[car_index] , a )
if random() < probability:
# Randomly, a driver will slow down
__a = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _lowerCamelCase( a , a , a , a ):
__a = len(highway[0] )
for i in range(a ):
__a = update(highway[i] , a , a )
__a = [-1] * number_of_cells
for car_index in range(a ):
__a = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__a = (car_index + speed) % number_of_cells
# Commit the change of position
__a = speed
highway.append(a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE__:Optional[int] = tuple[int, int]
class snake_case__ :
def __init__( self ):
__a = []
__a = set()
def a__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def a__ ( self ):
return len(self.elements ) == 0
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def a__ ( self , lowerCamelCase ):
if item in self.set:
self.set.remove(lowerCamelCase )
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def a__ ( self ):
return self.elements[0][1]
def a__ ( self ):
((__a) , (__a)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _lowerCamelCase( a , a ):
# euclidean distance
__a = np.array(a )
__a = np.array(a )
return np.linalg.norm(a - b )
def _lowerCamelCase( a , a ):
# integer division by time variable
return consistent_heuristic(a , a ) // t
def _lowerCamelCase( a , a ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase( a , a , a , a ):
__a = g_function[start] + Wa * heuristics[i](a , a )
return ans
def _lowerCamelCase( a , a , a ):
__a = np.chararray((n, n) )
for i in range(a ):
for j in range(a ):
__a = "*"
for i in range(a ):
for j in range(a ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(a ):
for j in range(a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__a = back_pointer[goal]
while x != start:
print(a , end=" " )
__a = back_pointer[x]
print(a )
sys.exit()
def _lowerCamelCase( a ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase( a , a , a , a , a , a , a , a , ):
for itera in range(a ):
open_list[itera].remove_element(a )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a )
__a = -1
__a = float("inf" )
if valid(a ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(a , key(a , 0 , a , a ) )
if neighbours not in close_list_inad:
for var in range(1 , a ):
if key(a , a , a , a ) <= Wa * key(
a , 0 , a , a ):
open_list[j].put(
a , key(a , a , a , a ) )
def _lowerCamelCase( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE__:Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE__:str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE__:int = make_common_ground()
SCREAMING_SNAKE_CASE__:List[str] = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE__:str = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 20
SCREAMING_SNAKE_CASE__:Dict = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[Any] = (n - 1, n - 1)
SCREAMING_SNAKE_CASE__:List[str] = 1
def _lowerCamelCase( a , a , a ):
__a = {start: 0, goal: float("inf" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(a ):
open_list.append(PriorityQueue() )
open_list[i].put(a , key(a , a , a , a ) )
__a = []
__a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a , __a = open_list[i].top_show()
visited.add(a )
expand_state(
a , a , a , a , a , a , a , a , )
close_list_inad.append(a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a = open_list[0].top_show()
visited.add(a )
expand_state(
a , 0 , a , a , a , a , a , a , )
close_list_anchor.append(a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 67 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.