code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self._create_example_records()
UpperCamelCase = Dataset.from_list(lowerCamelCase__ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(lowerCamelCase__ ):
self.assertDictEqual(lowerCamelCase__ , example_records[i] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self._create_example_records()
UpperCamelCase = Dataset.from_list(lowerCamelCase__ )
UpperCamelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCAmelCase ( self ): # checks what happens with missing columns
'''simple docstring'''
UpperCamelCase = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
UpperCamelCase = Dataset.from_list(lowerCamelCase__ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def UpperCAmelCase ( self ): # checks if the type can be inferred from the second record
'''simple docstring'''
UpperCamelCase = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
UpperCamelCase = Dataset.from_list(lowerCamelCase__ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 212 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ):
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = True
_snake_case = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=lowerCamelCase__ )
UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 212 | 1 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase ( __UpperCAmelCase ):
lowerCamelCase_ =["""vqvae"""]
def __init__( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , ) -> List[Any]:
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def __UpperCAmelCase ( self : str) -> List[Any]:
return 50 if isinstance(self.scheduler , UpperCAmelCase_) else 1000
@torch.no_grad()
def __call__( self : Any , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : List[str] = 0 , __lowerCAmelCase : Tuple = 0 , __lowerCAmelCase : int = None , __lowerCAmelCase : Union[str, Any] = None , __lowerCAmelCase : List[Any] = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : str = None , __lowerCAmelCase : Union[str, Any] = 0 , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : Optional[int]=True , ) -> List[Any]:
lowercase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
lowercase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
lowercase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
lowercase_ = noise
lowercase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
lowercase_ = self.mel.audio_slice_to_image(UpperCAmelCase_)
lowercase_ = np.frombuffer(input_image.tobytes() , dtype="uint8").reshape(
(input_image.height, input_image.width))
lowercase_ = (input_image / 255) * 2 - 1
lowercase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
lowercase_ = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
lowercase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase_ = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
lowercase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase_ = int(mask_start_secs * pixels_per_second)
lowercase_ = int(mask_end_secs * pixels_per_second)
lowercase_ = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
lowercase_ = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)["sample"]
else:
lowercase_ = self.unet(UpperCAmelCase_ , UpperCAmelCase_)["sample"]
if isinstance(self.scheduler , UpperCAmelCase_):
lowercase_ = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )["prev_sample"]
else:
lowercase_ = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )["prev_sample"]
if mask is not None:
if mask_start > 0:
lowercase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase_ = 1 / self.vqvae.config.scaling_factor * images
lowercase_ = self.vqvae.decode(UpperCAmelCase_)["sample"]
lowercase_ = (images / 2 + 0.5).clamp(0 , 1)
lowercase_ = images.cpu().permute(0 , 2 , 3 , 1).numpy()
lowercase_ = (images * 255).round().astype("uint8")
lowercase_ = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode="RGB").convert("L") for _ in images))
lowercase_ = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def __UpperCAmelCase ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any = 50) -> Optional[Any]:
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
lowercase_ = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8").reshape((1, image.height, image.width)) for image in images])
lowercase_ = (sample / 255) * 2 - 1
lowercase_ = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
lowercase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase_ = self.scheduler.alphas_cumprod[t]
lowercase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase_ = 1 - alpha_prod_t
lowercase_ = self.unet(UpperCAmelCase_ , UpperCAmelCase_)["sample"]
lowercase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __UpperCAmelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any]) -> List[str]:
lowercase_ = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 711 | '''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase_ : Optional[List[str]] = None
lowerCAmelCase_ : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase_ : Any = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class lowercase :
lowerCamelCase_ =True
lowerCamelCase_ =None
# Automatically constructed
lowerCamelCase_ ="PIL.Image.Image"
lowerCamelCase_ =pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCamelCase_ =field(default='Image' , init=__lowerCamelCase , repr=__lowerCamelCase )
def __call__( self : List[Any]) -> List[Any]:
return self.pa_type
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowercase_ = np.array(__lowerCAmelCase)
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
return {"path": value, "bytes": None}
elif isinstance(__lowerCAmelCase , __lowerCAmelCase):
return {"path": None, "bytes": value}
elif isinstance(__lowerCAmelCase , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowerCAmelCase)
elif isinstance(__lowerCAmelCase , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowerCAmelCase)
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.')
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCAmelCase : dict , __lowerCAmelCase : Dict=None) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'.")
if token_per_repo_id is None:
lowercase_ = {}
lowercase_ , lowercase_ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.')
else:
if is_local_path(__lowerCAmelCase):
lowercase_ = PIL.Image.open(__lowerCAmelCase)
else:
lowercase_ = path.split("::")[-1]
try:
lowercase_ = string_to_dict(__lowerCAmelCase , config.HUB_DATASETS_URL)["repo_id"]
lowercase_ = token_per_repo_id.get(__lowerCAmelCase)
except ValueError:
lowercase_ = None
with xopen(__lowerCAmelCase , "rb" , use_auth_token=__lowerCAmelCase) as f:
lowercase_ = BytesIO(f.read())
lowercase_ = PIL.Image.open(bytes_)
else:
lowercase_ = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def __UpperCAmelCase ( self : Tuple) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
def __UpperCAmelCase ( self : str , __lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.binary())
lowercase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
lowercase_ = storage.field("bytes")
else:
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.binary())
if storage.type.get_field_index("path") >= 0:
lowercase_ = storage.field("path")
else:
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
lowercase_ = pa.array(
[encode_np_array(np.array(__lowerCAmelCase))["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(__lowerCAmelCase , self.pa_type)
def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__lowerCAmelCase : int):
with xopen(__lowerCAmelCase , "rb") as f:
lowercase_ = f.read()
return bytes_
lowercase_ = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ = pa.array(
[os.path.basename(__lowerCAmelCase) if path is not None else None for path in storage.field("path").to_pylist()] , type=pa.string() , )
lowercase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(__lowerCAmelCase , self.pa_type)
def __a ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __a ( __lowerCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
lowercase_ = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ = image.format
else:
lowercase_ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__lowerCamelCase , format=__lowerCamelCase )
return buffer.getvalue()
def __a ( __lowerCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(__lowerCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def __a ( __lowerCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
lowercase_ = array.dtype
lowercase_ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
lowercase_ = dtype.kind
lowercase_ = dtype.itemsize
lowercase_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ = dtype_byteorder + dtype_kind + str(__lowerCamelCase )
lowercase_ = np.dtype(__lowerCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
lowercase_ = PIL.Image.fromarray(array.astype(__lowerCamelCase ) )
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def __a ( __lowerCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
lowercase_ , lowercase_ = first_non_null_value(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__lowerCamelCase , np.ndarray ):
lowercase_ = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
lowercase_ = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 461 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : Tuple = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 241 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class _snake_case ( _A ):
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,UpperCamelCase=None ) -> Any:
snake_case__ :Any = self.layer[current_layer](UpperCamelCase ,UpperCamelCase ,head_mask[current_layer] )
snake_case__ :int = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , _A , )
class _snake_case ( _A ):
def __init__( self ,UpperCamelCase ) -> Any:
super().__init__(UpperCamelCase )
snake_case__ :Tuple = BertEncoderWithPabee(UpperCamelCase )
self.init_weights()
snake_case__ :Tuple = 0
snake_case__ :Union[str, Any] = 0
snake_case__ :Tuple = 0
snake_case__ :Union[str, Any] = 0
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Dict:
snake_case__ :Tuple = threshold
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = patience
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Optional[int] = 0
snake_case__ :List[Any] = 0
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Optional[Any] = self.inference_layers_num / self.inference_instances_num
snake_case__ :Optional[Any] = (
f'*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='
f' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'
)
print(UpperCamelCase )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=False ,) -> str:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
snake_case__ :List[str] = input_ids.size()
elif inputs_embeds is not None:
snake_case__ :List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
snake_case__ :List[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case__ :Union[str, Any] = torch.ones(UpperCamelCase ,device=UpperCamelCase )
if token_type_ids is None:
snake_case__ :Any = torch.zeros(UpperCamelCase ,dtype=torch.long ,device=UpperCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case__ :torch.Tensor = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
snake_case__ , snake_case__ , snake_case__ :Union[str, Any] = encoder_hidden_states.size()
snake_case__ :int = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
snake_case__ :Optional[int] = torch.ones(UpperCamelCase ,device=UpperCamelCase )
snake_case__ :Optional[int] = self.invert_attention_mask(UpperCamelCase )
else:
snake_case__ :Dict = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case__ :Tuple = self.get_head_mask(UpperCamelCase ,self.config.num_hidden_layers )
snake_case__ :List[str] = self.embeddings(
input_ids=UpperCamelCase ,position_ids=UpperCamelCase ,token_type_ids=UpperCamelCase ,inputs_embeds=UpperCamelCase )
snake_case__ :Union[str, Any] = embedding_output
if self.training:
snake_case__ :Optional[int] = []
for i in range(self.config.num_hidden_layers ):
snake_case__ :List[Any] = self.encoder.adaptive_forward(
UpperCamelCase ,current_layer=UpperCamelCase ,attention_mask=UpperCamelCase ,head_mask=UpperCamelCase )
snake_case__ :int = self.pooler(UpperCamelCase )
snake_case__ :Tuple = output_layers[i](output_dropout(UpperCamelCase ) )
res.append(UpperCamelCase )
elif self.patience == 0: # Use all layers for inference
snake_case__ :Any = self.encoder(
UpperCamelCase ,attention_mask=UpperCamelCase ,head_mask=UpperCamelCase ,encoder_hidden_states=UpperCamelCase ,encoder_attention_mask=UpperCamelCase ,)
snake_case__ :Optional[int] = self.pooler(encoder_outputs[0] )
snake_case__ :Optional[int] = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase )]
else:
snake_case__ :Optional[int] = 0
snake_case__ :Dict = None
snake_case__ :str = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
snake_case__ :Any = self.encoder.adaptive_forward(
UpperCamelCase ,current_layer=UpperCamelCase ,attention_mask=UpperCamelCase ,head_mask=UpperCamelCase )
snake_case__ :Union[str, Any] = self.pooler(UpperCamelCase )
snake_case__ :Optional[Any] = output_layers[i](UpperCamelCase )
if regression:
snake_case__ :Optional[Any] = logits.detach()
if patient_result is not None:
snake_case__ :Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
snake_case__ :Optional[Any] = 0
else:
snake_case__ :Tuple = logits.detach().argmax(dim=1 )
if patient_result is not None:
snake_case__ :Optional[int] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase ) ):
patient_counter += 1
else:
snake_case__ :Any = 0
snake_case__ :int = logits
if patient_counter == self.patience:
break
snake_case__ :int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , _A , )
class _snake_case ( _A ):
def __init__( self ,UpperCamelCase ) -> Union[str, Any]:
super().__init__(UpperCamelCase )
snake_case__ :Optional[int] = config.num_labels
snake_case__ :Tuple = BertModelWithPabee(UpperCamelCase )
snake_case__ :Any = nn.Dropout(config.hidden_dropout_prob )
snake_case__ :Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size ,self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,) -> Any:
snake_case__ :Optional[int] = self.bert(
input_ids=UpperCamelCase ,attention_mask=UpperCamelCase ,token_type_ids=UpperCamelCase ,position_ids=UpperCamelCase ,head_mask=UpperCamelCase ,inputs_embeds=UpperCamelCase ,output_dropout=self.dropout ,output_layers=self.classifiers ,regression=self.num_labels == 1 ,)
snake_case__ :Tuple = (logits[-1],)
if labels is not None:
snake_case__ :Optional[int] = None
snake_case__ :Optional[Any] = 0
for ix, logits_item in enumerate(UpperCamelCase ):
if self.num_labels == 1:
# We are doing regression
snake_case__ :int = MSELoss()
snake_case__ :str = loss_fct(logits_item.view(-1 ) ,labels.view(-1 ) )
else:
snake_case__ :int = CrossEntropyLoss()
snake_case__ :Union[str, Any] = loss_fct(logits_item.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
if total_loss is None:
snake_case__ :Optional[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
snake_case__ :List[Any] = (total_loss / total_weights,) + outputs
return outputs | 241 | 1 |
"""simple docstring"""
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase ( lowerCAmelCase__ : List[Any] ) -> str:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
__a = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase_ = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 10_24 )
print("Key files generation successful." )
def lowerCAmelCase_ ( snake_case_ : int ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
UpperCAmelCase_ = rabinMiller.generate_large_prime(_UpperCamelCase )
print("Generating prime q..." )
UpperCAmelCase_ = rabinMiller.generate_large_prime(_UpperCamelCase )
UpperCAmelCase_ = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
UpperCAmelCase_ = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_UpperCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
UpperCAmelCase_ = cryptoMath.find_mod_inverse(_UpperCamelCase , (p - 1) * (q - 1) )
UpperCAmelCase_ = (n, e)
UpperCAmelCase_ = (n, d)
return (public_key, private_key)
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int ) -> None:
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
UpperCAmelCase_ , UpperCAmelCase_ = generate_key(_UpperCamelCase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 78 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase_ : str = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = """ibert"""
def __init__( self ,_SCREAMING_SNAKE_CASE=30_522 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0_2 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE="absolute" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE="none" ,**_SCREAMING_SNAKE_CASE ,) -> Dict:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = quant_mode
_snake_case = force_dequant
class _a ( __lowerCAmelCase ):
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
_snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 185 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_A : int = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = ['''DPTFeatureExtractor''']
_A : str = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712 |
class a :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str = "" , SCREAMING_SNAKE_CASE_ : bool = False ):
# Mapping from the first character of the prefix of the node
__lowerCamelCase: dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
__lowerCamelCase: str = is_leaf
__lowerCamelCase: Optional[int] = prefix
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: Optional[Any] = 0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE_ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : list[str] ):
for word in words:
self.insert(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__lowerCamelCase: Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__lowerCamelCase: Any = RadixNode(prefix=SCREAMING_SNAKE_CASE_ , is_leaf=SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase: Union[str, Any] = self.nodes[word[0]]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: List[str] = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__lowerCamelCase: List[Any] = remaining_prefix
__lowerCamelCase: Optional[Any] = self.nodes[matching_string[0]]
__lowerCamelCase: Optional[int] = RadixNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = aux_node
if remaining_word == "":
__lowerCamelCase: Optional[int] = True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: int = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ )
if not incoming_node:
return False
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Any = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: str = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ )
if not incoming_node:
return False
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Dict = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE_ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__lowerCamelCase: List[Any] = list(self.nodes.values() )[0]
__lowerCamelCase: Any = merging_node.is_leaf
self.prefix += merging_node.prefix
__lowerCamelCase: Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__lowerCamelCase: int = False
# If there is 1 edge, we merge it with its child
else:
__lowerCamelCase: Union[str, Any] = list(incoming_node.nodes.values() )[0]
__lowerCamelCase: List[str] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__lowerCamelCase: Union[str, Any] = merging_node.nodes
return True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __lowerCAmelCase ( ) -> bool:
__lowerCamelCase: Optional[int] = """banana bananas bandana band apple all beast""".split()
__lowerCamelCase: Optional[Any] = RadixNode()
root.insert_many(snake_case )
assert all(root.find(snake_case ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __lowerCAmelCase ( ) -> None:
assert test_trie()
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase: int = RadixNode()
__lowerCamelCase: str = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(snake_case )
print("""Words:""" , snake_case )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 189 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=3 , UpperCamelCase=16 , UpperCamelCase=[32, 64, 128] , UpperCamelCase=[1, 2, 1] , UpperCamelCase=[2, 2, 4] , UpperCamelCase=2 , UpperCamelCase=2.0 , UpperCamelCase=True , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase="gelu" , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=0.02 , UpperCamelCase=1e-5 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=10 , UpperCamelCase=8 , UpperCamelCase=["stage1", "stage2"] , UpperCamelCase=[1, 2] , ) -> List[Any]:
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = hidden_sizes
__a = depths
__a = num_heads
__a = window_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = use_absolute_embeddings
__a = patch_norm
__a = layer_norm_eps
__a = initializer_range
__a = is_training
__a = scope
__a = use_labels
__a = type_sequence_label_size
__a = encoder_stride
__a = out_features
__a = out_indices
def UpperCamelCase__ ( self ) -> List[str]:
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
__a = FocalNetModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = model(UpperCamelCase )
__a = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__a = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
__a = FocalNetBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__a = None
__a = FocalNetBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
__a = FocalNetForMaskedImageModeling(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = model(UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = FocalNetForMaskedImageModeling(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
__a = self.type_sequence_label_size
__a = FocalNetForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = FocalNetForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ) -> Dict:
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
_a = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def UpperCamelCase__ ( self ) -> List[Any]:
__a = FocalNetModelTester(self )
__a = ConfigTester(self , config_class=UpperCamelCase , embed_dim=37 , has_text_modality=UpperCamelCase )
def UpperCamelCase__ ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ) -> Optional[int]:
return
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase )
def UpperCamelCase__ ( self ) -> Dict:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase )
def UpperCamelCase__ ( self ) -> int:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def UpperCamelCase__ ( self ) -> int:
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def UpperCamelCase__ ( self ) -> Any:
pass
def UpperCamelCase__ ( self ) -> Optional[int]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def UpperCamelCase__ ( self ) -> str:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a = model_class(UpperCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
__a = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__a = outputs.hidden_states
__a = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# FocalNet has a different seq_length
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__a = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
__a , __a , __a , __a = reshaped_hidden_states[0].shape
__a = (
reshaped_hidden_states[0].view(UpperCamelCase , UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase__ ( self ) -> Any:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__a = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> str:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__a = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__a = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) )
@slow
def UpperCamelCase__ ( self ) -> int:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = FocalNetModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase__ ( self ) -> List[Any]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = _config_zero_init(UpperCamelCase )
for model_class in self.all_model_classes:
__a = model_class(config=UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class __lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase__ ( self ) -> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(UpperCamelCase )
__a = self.default_image_processor
__a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__a = image_processor(images=UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
__a = model(**UpperCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
__a = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowercase ( __magic_name__ , unittest.TestCase ):
_a = (FocalNetBackbone,) if is_torch_available() else ()
_a = FocalNetConfig
_a = False
def UpperCamelCase__ ( self ) -> List[Any]:
__a = FocalNetModelTester(self )
| 539 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def SCREAMING_SNAKE_CASE ( a_ : Path , a_ : list ):
__a = '\n'.join(a_ )
Path(a_ ).open('w' ).writelines(a_ )
UpperCAmelCase_ = "patrickvonplaten/t5-tiny-random"
UpperCAmelCase_ = "sshleifer/bart-tiny-random"
UpperCAmelCase_ = "sshleifer/tiny-mbart"
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __lowercase ( __magic_name__ ):
def UpperCamelCase__ ( self , UpperCamelCase ) -> Dict:
__a = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__a = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__a = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(UpperCamelCase , UpperCamelCase )
__a = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
__a = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__a = f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(UpperCamelCase , 'argv' , UpperCamelCase ):
run_generate()
assert Path(UpperCamelCase ).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase__ ( self ) -> int:
self.run_eval_tester(UpperCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCamelCase__ ( self , UpperCamelCase ) -> Union[str, Any]:
self.run_eval_tester(UpperCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCamelCase__ ( self , UpperCamelCase ) -> Optional[Any]:
__a = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__a = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__a = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
__a = Path(self.get_auto_remove_tmp_dir() )
__a = str(tmp_dir / 'scores.json' )
__a = str(tmp_dir / 'val.target' )
_dump_articles(UpperCamelCase , text['en'] )
_dump_articles(UpperCamelCase , text['de'] )
__a = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__a = f"\n run_eval_search.py\n {model}\n {str(UpperCamelCase )}\n {str(UpperCamelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(UpperCamelCase , 'argv' , UpperCamelCase ):
with CaptureStdout() as cs:
run_search()
__a = [' num_beams | length_penalty', model, 'Best score args']
__a = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(UpperCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(UpperCamelCase ).exists()
os.remove(Path(UpperCamelCase ) )
| 539 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase_ :
def __init__( self: Optional[int], _lowercase: Any, _lowercase: Optional[int]=99, _lowercase: int=13, _lowercase: Union[str, Any]=16, _lowercase: str=7, _lowercase: Optional[int]=True, _lowercase: Tuple=True, _lowercase: str=True, _lowercase: Any=False, _lowercase: int=True, _lowercase: int=2, _lowercase: Any=32, _lowercase: Tuple=4, _lowercase: Optional[Any]=4, _lowercase: Any=30, _lowercase: Any=0, _lowercase: str=1, _lowercase: str=2, _lowercase: int=None, ):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = decoder_seq_length
# For common tests
__lowerCAmelCase = self.decoder_seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = d_model
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = decoder_start_token_id
__lowerCAmelCase = use_cache
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = None
__lowerCAmelCase = decoder_seq_length
__lowerCAmelCase = 2
__lowerCAmelCase = 1
def _lowercase ( self: str):
'''simple docstring'''
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
__lowerCAmelCase = None
if self.use_attention_mask:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
__lowerCAmelCase = TrOCRConfig(
vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, )
return (config, input_ids, attention_mask, lm_labels)
def _lowercase ( self: Dict, _lowercase: int, _lowercase: Dict, _lowercase: Any, _lowercase: List[str], ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = TrOCRDecoder(config=_SCREAMING_SNAKE_CASE).to(_SCREAMING_SNAKE_CASE).eval()
__lowerCAmelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__lowerCAmelCase = model(_SCREAMING_SNAKE_CASE, use_cache=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase = model(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase = model(_SCREAMING_SNAKE_CASE, use_cache=_SCREAMING_SNAKE_CASE)
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE) == len(_SCREAMING_SNAKE_CASE))
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE) == len(_SCREAMING_SNAKE_CASE) + 1)
__lowerCAmelCase = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((2, 1), config.vocab_size - 1) + 1
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens], dim=-1)
__lowerCAmelCase = model(_SCREAMING_SNAKE_CASE)["""last_hidden_state"""]
__lowerCAmelCase = model(_SCREAMING_SNAKE_CASE, past_key_values=_SCREAMING_SNAKE_CASE)["""last_hidden_state"""]
# select random slice
__lowerCAmelCase = ids_tensor((1,), output_from_past.shape[-1]).item()
__lowerCAmelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, atol=1e-3)
def _lowercase ( self: List[Any]):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__UpperCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__UpperCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
__UpperCamelCase = True
__UpperCamelCase = False
def _lowercase ( self: Any):
'''simple docstring'''
__lowerCAmelCase = TrOCRStandaloneDecoderModelTester(self, is_training=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase = ConfigTester(self, config_class=_SCREAMING_SNAKE_CASE)
def _lowercase ( self: str):
'''simple docstring'''
pass
def _lowercase ( self: Tuple):
'''simple docstring'''
pass
def _lowercase ( self: List[Any]):
'''simple docstring'''
pass
def _lowercase ( self: Tuple):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: Union[str, Any]):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_SCREAMING_SNAKE_CASE)
def _lowercase ( self: List[Any]):
'''simple docstring'''
return
@unittest.skip("""The model doesn't support left padding""") # and it's not used enough to be worth fixing :)
def _lowercase ( self: Any):
'''simple docstring'''
pass
| 713 |
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase__ ) )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
'''simple docstring'''
if index == len(UpperCamelCase__ ):
return True
# Recursive Step
for i in range(UpperCamelCase__ ):
if valid_coloring(graph[index] , UpperCamelCase__ , UpperCamelCase__ ):
# Color current vertex
__lowerCAmelCase = i
# Validate coloring
if util_color(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index + 1 ):
return True
# Backtrack
__lowerCAmelCase = -1
return False
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
__lowerCAmelCase = [-1] * len(UpperCamelCase__ )
if util_color(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 0 ):
return colored_vertices
return []
| 334 | 0 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_SCREAMING_SNAKE_CASE : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Dict = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: List[Any] = list(s_dict.keys() )
for key in keys:
_lowercase: int = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_lowercase: List[str] = new_key.replace(__magic_name__ , __magic_name__ )
print(f"{key} -> {new_key}" )
_lowercase: List[str] = s_dict.pop(__magic_name__ )
return s_dict
def __lowerCAmelCase ( __magic_name__ ):
_lowercase , _lowercase: Optional[Any] = emb.weight.shape
_lowercase: Dict = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
_lowercase: str = emb.weight.data
return lin_layer
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
_lowercase: Tuple = os.path.basename(__magic_name__ )
_lowercase: int = url.split("/" )[-2]
_lowercase: Any = os.path.join(__magic_name__ , __magic_name__ )
if os.path.exists(__magic_name__ ) and not os.path.isfile(__magic_name__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(__magic_name__ ):
_lowercase: Optional[int] = open(__magic_name__ , "rb" ).read()
if hashlib.shaaaa(__magic_name__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(__magic_name__ ) as source, open(__magic_name__ , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=8_0 , unit="iB" , unit_scale=__magic_name__ , unit_divisor=1_0_2_4 ) as loop:
while True:
_lowercase: List[Any] = source.read(8_1_9_2 )
if not buffer:
break
output.write(__magic_name__ )
loop.update(len(__magic_name__ ) )
_lowercase: Any = open(__magic_name__ , "rb" ).read()
if hashlib.shaaaa(__magic_name__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
if ".pt" not in checkpoint_path:
_lowercase: Any = _download(_MODELS[checkpoint_path] )
else:
_lowercase: int = torch.load(__magic_name__ , map_location="cpu" )
_lowercase: Tuple = original_checkpoint["dims"]
_lowercase: Optional[int] = original_checkpoint["model_state_dict"]
_lowercase: Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(__magic_name__ )
rename_keys(__magic_name__ )
_lowercase: Any = True
_lowercase: Any = state_dict["decoder.layers.0.fc1.weight"].shape[0]
_lowercase: Dict = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=__magic_name__ , decoder_ffn_dim=__magic_name__ , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
_lowercase: List[str] = WhisperForConditionalGeneration(__magic_name__ )
_lowercase , _lowercase: Dict = model.model.load_state_dict(__magic_name__ , strict=__magic_name__ )
if len(__magic_name__ ) > 0 and not set(__magic_name__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f" but all the following weights are missing {missing}" )
if tie_embeds:
_lowercase: Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowercase: Dict = proj_out_weights
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 226 |
def __lowerCAmelCase ( __magic_name__ ):
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("only integers accepted as input" )
else:
_lowercase: Optional[Any] = str(abs(__magic_name__ ) )
_lowercase: Tuple = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int("".join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 226 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class a__ :
def __init__( self : List[str] ,a__ : int ,a__ : MutableSequence[float]) -> None:
"""simple docstring"""
if len(a__) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''')
_lowerCAmelCase:list[float] = list(a__)
_lowerCAmelCase:Optional[Any] = degree
def __add__( self : Any ,a__ : Polynomial) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
_lowerCAmelCase:Union[str, Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,a__)
else:
_lowerCAmelCase:Tuple = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,a__)
def __sub__( self : Optional[int] ,a__ : Polynomial) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 ,[-1])
def __neg__( self : Optional[Any]) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree ,[-c for c in self.coefficients])
def __mul__( self : Tuple ,a__ : Polynomial) -> Polynomial:
"""simple docstring"""
_lowerCAmelCase:list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,a__)
def __UpperCamelCase ( self : Tuple ,a__ : int | float) -> int | float:
"""simple docstring"""
_lowerCAmelCase:int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple) -> str:
"""simple docstring"""
_lowerCAmelCase:Dict = ''''''
for i in range(self.degree ,-1 ,-1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(a__)
return polynomial
def __repr__( self : Any) -> str:
"""simple docstring"""
return self.__str__()
def __UpperCamelCase ( self : int) -> Polynomial:
"""simple docstring"""
_lowerCAmelCase:list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase:Optional[int] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,a__)
def __UpperCamelCase ( self : Any ,a__ : int | float = 0) -> Polynomial:
"""simple docstring"""
_lowerCAmelCase:list[float] = [0] * (self.degree + 2)
_lowerCAmelCase:List[str] = constant
for i in range(self.degree + 1):
_lowerCAmelCase:Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,a__)
def __eq__( self : Any ,a__ : object) -> bool:
"""simple docstring"""
if not isinstance(a__ ,a__):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple ,a__ : object) -> bool:
"""simple docstring"""
return not self.__eq__(a__)
| 439 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class a__ ( UpperCamelCase_ ):
snake_case__ = '''bert-generation'''
def __init__( self : Dict ,a__ : str=5_0358 ,a__ : List[str]=1024 ,a__ : int=24 ,a__ : Optional[Any]=16 ,a__ : List[str]=4096 ,a__ : Optional[int]="gelu" ,a__ : str=0.1 ,a__ : Union[str, Any]=0.1 ,a__ : int=512 ,a__ : Dict=0.02 ,a__ : List[Any]=1E-12 ,a__ : List[Any]=0 ,a__ : int=2 ,a__ : str=1 ,a__ : Dict="absolute" ,a__ : int=True ,**a__ : List[str] ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,**a__)
_lowerCAmelCase:Optional[Any] = vocab_size
_lowerCAmelCase:Union[str, Any] = hidden_size
_lowerCAmelCase:Any = num_hidden_layers
_lowerCAmelCase:int = num_attention_heads
_lowerCAmelCase:int = hidden_act
_lowerCAmelCase:List[Any] = intermediate_size
_lowerCAmelCase:Optional[Any] = hidden_dropout_prob
_lowerCAmelCase:int = attention_probs_dropout_prob
_lowerCAmelCase:Optional[int] = max_position_embeddings
_lowerCAmelCase:Dict = initializer_range
_lowerCAmelCase:Union[str, Any] = layer_norm_eps
_lowerCAmelCase:int = position_embedding_type
_lowerCAmelCase:Tuple = use_cache
| 439 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : str = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "efficientformer"
def __init__( self : List[Any] , A_ : List[int] = [3, 2, 6, 4] , A_ : List[int] = [48, 96, 2_24, 4_48] , A_ : List[bool] = [True, True, True, True] , A_ : int = 4_48 , A_ : int = 32 , A_ : int = 4 , A_ : int = 7 , A_ : int = 5 , A_ : int = 8 , A_ : int = 4 , A_ : float = 0.0 , A_ : int = 16 , A_ : int = 3 , A_ : int = 3 , A_ : int = 3 , A_ : int = 2 , A_ : int = 1 , A_ : float = 0.0 , A_ : int = 1 , A_ : bool = True , A_ : bool = True , A_ : float = 1e-5 , A_ : str = "gelu" , A_ : float = 0.02 , A_ : float = 1e-12 , A_ : int = 2_24 , A_ : float = 1e-05 , **A_ : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_: Dict = hidden_act
lowerCamelCase_: List[Any] = hidden_dropout_prob
lowerCamelCase_: Optional[int] = hidden_sizes
lowerCamelCase_: List[Any] = num_hidden_layers
lowerCamelCase_: Dict = num_attention_heads
lowerCamelCase_: Optional[int] = initializer_range
lowerCamelCase_: Optional[Any] = layer_norm_eps
lowerCamelCase_: Any = patch_size
lowerCamelCase_: Tuple = num_channels
lowerCamelCase_: str = depths
lowerCamelCase_: List[str] = mlp_expansion_ratio
lowerCamelCase_: Dict = downsamples
lowerCamelCase_: Any = dim
lowerCamelCase_: List[str] = key_dim
lowerCamelCase_: List[Any] = attention_ratio
lowerCamelCase_: List[Any] = resolution
lowerCamelCase_: Any = pool_size
lowerCamelCase_: Any = downsample_patch_size
lowerCamelCase_: List[str] = downsample_stride
lowerCamelCase_: Union[str, Any] = downsample_pad
lowerCamelCase_: int = drop_path_rate
lowerCamelCase_: Dict = num_metaad_blocks
lowerCamelCase_: List[str] = distillation
lowerCamelCase_: int = use_layer_scale
lowerCamelCase_: Optional[Any] = layer_scale_init_value
lowerCamelCase_: Union[str, Any] = image_size
lowerCamelCase_: Union[str, Any] = batch_norm_eps
| 423 | from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class a__ ( __SCREAMING_SNAKE_CASE ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_A = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
_A = Features({"text": Value("string" )} )
_A = Features({"summary": Value("string" )} )
_A = "text"
_A = "summary"
@property
def lowerCAmelCase ( self : List[str] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 423 | 1 |
from ..utils import DummyObject, requires_backends
class snake_case_ (metaclass=lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = ['''torch''', '''torchsde''']
def __init__( self :Optional[int] ,*__snake_case :Optional[int] ,**__snake_case :Optional[int] ) -> Dict:
requires_backends(self ,['torch', 'torchsde'] )
@classmethod
def lowerCamelCase__( cls :Dict ,*__snake_case :Union[str, Any] ,**__snake_case :Union[str, Any] ) -> Tuple:
requires_backends(cls ,['torch', 'torchsde'] )
@classmethod
def lowerCamelCase__( cls :Any ,*__snake_case :List[Any] ,**__snake_case :Any ) -> Dict:
requires_backends(cls ,['torch', 'torchsde'] )
| 708 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
UpperCamelCase_ = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | def snake_case__ ( lowercase , lowercase ):
lowerCAmelCase_: list[list[str]] = [[] for _ in range(lowercase )]
lowerCAmelCase_: Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(lowercase ) <= key:
return input_string
for position, character in enumerate(lowercase ):
lowerCAmelCase_: Optional[Any] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_: Any = min(lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowercase )
lowerCAmelCase_: Optional[int] = ["".join(lowercase ) for row in temp_grid]
lowerCAmelCase_: int = "".join(lowercase )
return output_string
def snake_case__ ( lowercase , lowercase ):
lowerCAmelCase_: Tuple = []
lowerCAmelCase_: str = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
lowerCAmelCase_: list[list[str]] = [[] for _ in range(lowercase )] # generates template
for position in range(len(lowercase ) ):
lowerCAmelCase_: List[str] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_: Optional[int] = min(lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
lowerCAmelCase_: Optional[Any] = 0
for row in temp_grid: # fills in the characters
lowerCAmelCase_: Tuple = input_string[counter : counter + len(lowercase )]
grid.append(list(lowercase ) )
counter += len(lowercase )
lowerCAmelCase_: int = "" # reads as zigzag
for position in range(len(lowercase ) ):
lowerCAmelCase_: str = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_: Optional[int] = min(lowercase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def snake_case__ ( lowercase ):
lowerCAmelCase_: Dict = {}
for key_guess in range(1 , len(lowercase ) ): # tries every key
lowerCAmelCase_: int = decrypt(lowercase , lowercase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 613 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = 42
__snake_case = 42
def __init__( self , _lowercase , _lowercase ) -> List[Any]:
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , _lowercase = 1 , _lowercase = 50 , _lowercase = None , _lowercase = "pil" , _lowercase = True , **_lowercase , ) -> Union[Tuple, ImagePipelineOutput]:
_lowerCamelCase : List[str] = self.unet.config.sample_size
_lowerCamelCase : int = (batch_size, 3, img_size, img_size)
_lowerCamelCase : Optional[int] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowerCamelCase : Any = randn_tensor(_lowercase , generator=_lowercase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_lowerCamelCase : List[Any] = self.scheduler.schedule[t]
_lowerCamelCase : str = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowerCamelCase : List[Any] = self.scheduler.add_noise_to_input(_lowercase , _lowercase , generator=_lowercase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCamelCase : int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowerCamelCase : Union[str, Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCamelCase : Optional[int] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_lowerCamelCase : List[str] = self.scheduler.step_correct(
_lowercase , _lowercase , _lowercase , _lowercase , step_output.prev_sample , step_output['''derivative'''] , )
_lowerCamelCase : Tuple = step_output.prev_sample
_lowerCamelCase : str = (sample / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCamelCase : Dict = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 717 | """simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """ViltImageProcessor"""
__snake_case = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _lowercase=None , _lowercase=None , **_lowercase ) -> Tuple:
_lowerCamelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowercase , )
_lowerCamelCase : Tuple = kwargs.pop('''feature_extractor''' )
_lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowercase , _lowercase )
_lowerCamelCase : Union[str, Any] = self.image_processor
def __call__( self , _lowercase , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchEncoding:
_lowerCamelCase : Dict = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
# add pixel_values + pixel_mask
_lowerCamelCase : List[Any] = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def a__ ( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a__ ( self , *_lowercase , **_lowercase ) -> Optional[int]:
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : Any = self.tokenizer.model_input_names
_lowerCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self ) -> Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowercase , )
return self.image_processor_class
@property
def a__ ( self ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowercase , )
return self.image_processor
| 558 | 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int = 10 ,_UpperCAmelCase : int = 22 ) -> int:
__snake_case : str = range(1 ,_UpperCAmelCase )
__snake_case : Tuple = range(1 ,_UpperCAmelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(1_0, 2_2) = }""")
| 286 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : Optional[Any] = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 286 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCAmelCase_ ( ):
a__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
a__ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(a )
# Let's go
a__ = parser.parse_args()
if not hasattr(a , 'func' ):
parser.print_help()
exit(1 )
# Run
a__ = args.func(a )
service.run()
if __name__ == "__main__":
main()
| 126 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__A : Optional[Any] = logging.getLogger()
def lowerCAmelCase_ ( a : str ):
a__ = {}
a__ = os.path.join(a , 'all_results.json' )
if os.path.exists(a ):
with open(a , 'r' ) as f:
a__ = json.load(a )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
__A : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
import xla_spawn
a__ = self.get_auto_remove_tmp_dir()
a__ = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_a , 'argv' , _a ):
a__ = time()
xla_spawn.main()
a__ = time()
a__ = get_results(_a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def lowercase__ ( self ):
"""simple docstring"""
import xla_spawn
a__ = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_a , 'argv' , _a ):
xla_spawn.main()
| 126 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_ ( self ) -> str:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__a ):
snake_case : Optional[Any] = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
snake_case : Tuple = FlaxAutoModel.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
@slow
def snake_case_ ( self ) -> Optional[Any]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__a ):
snake_case : Tuple = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
snake_case : Optional[int] = FlaxAutoModel.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
@slow
def snake_case_ ( self ) -> List[str]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
snake_case : Tuple = AutoTokenizer.from_pretrained(__a )
snake_case : Tuple = FlaxBertModel.from_pretrained(__a )
snake_case : Dict = tokenizer("""Do you support jax jitted function?""" ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**__a ):
return model(**__a )
eval(**__a ).block_until_ready()
@slow
def snake_case_ ( self ) -> Union[str, Any]:
for model_name in ["roberta-base", "roberta-large"]:
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(__a )
snake_case : str = FlaxRobertaModel.from_pretrained(__a )
snake_case : Tuple = tokenizer("""Do you support jax jitted function?""" ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**__a ):
return model(**__a )
eval(**__a ).block_until_ready()
def snake_case_ ( self ) -> Dict:
with self.assertRaisesRegex(
__a ,"""bert-base is not a local folder and is not a valid model identifier""" ):
snake_case : Optional[int] = FlaxAutoModel.from_pretrained("""bert-base""" )
def snake_case_ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
__a ,r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case : Union[str, Any] = FlaxAutoModel.from_pretrained(__a ,revision="""aaaaaa""" )
def snake_case_ ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
__a ,"""hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" ,):
snake_case : int = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def snake_case_ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(__a ,"""Use `from_pt=True` to load this model""" ):
snake_case : Any = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 116 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116 | 1 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCAmelCase )
if number < 1:
lowercase__ : List[Any] = F"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCAmelCase )
lowercase__ : Optional[Any] = 1
for i in range(1 , UpperCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428 | '''simple docstring'''
import string
import numpy
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase )
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
SCREAMING_SNAKE_CASE = numpy.vectorize(lambda a__ : x % 3_6 )
SCREAMING_SNAKE_CASE = numpy.vectorize(a__ )
def __init__( self , __lowerCAmelCase ) -> None:
lowercase__ : Optional[int] = self.modulus(__lowerCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowercase__ : int = encrypt_key.shape[0]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
return self.key_string.index(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
return self.key_string[round(__lowerCAmelCase )]
def _lowerCAmelCase( self ) -> None:
lowercase__ : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase__ : int = det % len(self.key_string )
lowercase__ : str = len(self.key_string )
if greatest_common_divisor(__lowerCAmelCase , len(self.key_string ) ) != 1:
lowercase__ : str = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
lowercase__ : Optional[int] = [char for char in text.upper() if char in self.key_string]
lowercase__ : Union[str, Any] = chars[-1]
while len(__lowerCAmelCase ) % self.break_key != 0:
chars.append(__lowerCAmelCase )
return "".join(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
lowercase__ : Tuple = self.process_text(text.upper() )
lowercase__ : List[str] = ''''''
for i in range(0 , len(__lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
lowercase__ : Optional[Any] = text[i : i + self.break_key]
lowercase__ : Any = [self.replace_letters(__lowerCAmelCase ) for char in batch]
lowercase__ : str = numpy.array([vec] ).T
lowercase__ : Optional[int] = self.modulus(self.encrypt_key.dot(__lowerCAmelCase ) ).T.tolist()[
0
]
lowercase__ : Any = ''''''.join(
self.replace_digits(__lowerCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _lowerCAmelCase( self ) -> numpy.ndarray:
lowercase__ : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase__ : str = det % len(self.key_string )
lowercase__ : int = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowercase__ : List[Any] = i
break
lowercase__ : List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowerCAmelCase ) )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
lowercase__ : List[str] = self.make_decrypt_key()
lowercase__ : Optional[int] = self.process_text(text.upper() )
lowercase__ : str = ''''''
for i in range(0 , len(__lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
lowercase__ : Optional[Any] = text[i : i + self.break_key]
lowercase__ : List[str] = [self.replace_letters(__lowerCAmelCase ) for char in batch]
lowercase__ : str = numpy.array([vec] ).T
lowercase__ : str = self.modulus(decrypt_key.dot(__lowerCAmelCase ) ).T.tolist()[0]
lowercase__ : int = ''''''.join(
self.replace_digits(__lowerCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __UpperCamelCase ( ):
lowercase__ : Tuple = int(input('''Enter the order of the encryption key: ''' ) )
lowercase__ : str = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(UpperCAmelCase ):
lowercase__ : str = [int(UpperCAmelCase ) for x in input().split()]
hill_matrix.append(UpperCAmelCase )
lowercase__ : Union[str, Any] = HillCipher(numpy.array(UpperCAmelCase ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
lowercase__ : Union[str, Any] = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
lowercase__ : List[str] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(UpperCAmelCase ) )
elif option == "2":
lowercase__ : Optional[int] = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 428 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase__ = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ (self: Optional[int] ) -> List[str]:
'''simple docstring'''
__a : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
__a : Dict = AutoTokenizer.from_pretrained("google/mt5-small" )
__a : Any = tokenizer("Hello there" , return_tensors="np" ).input_ids
__a : Union[str, Any] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
__a : Union[str, Any] = shift_tokens_right(__UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
__a : Tuple = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
__a : str = optax.softmax_cross_entropy(__UpperCAmelCase , onehot(__UpperCAmelCase , logits.shape[-1] ) ).mean()
__a : List[str] = -(labels.shape[-1] * loss.item())
__a : Optional[int] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 351 | 1 |
'''simple docstring'''
import random
class lowerCAmelCase_ :
"""simple docstring"""
@staticmethod
def __a ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a = [ord(SCREAMING_SNAKE_CASE__ ) for i in text]
__a = []
__a = []
for i in plain:
__a = random.randint(1 , 3_0_0 )
__a = (i + k) * k
cipher.append(SCREAMING_SNAKE_CASE__ )
key.append(SCREAMING_SNAKE_CASE__ )
return cipher, key
@staticmethod
def __a ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ):
'''simple docstring'''
__a = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(SCREAMING_SNAKE_CASE__ ) )
return "".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 201 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 0 # The first color of the flag.
SCREAMING_SNAKE_CASE_ = 1 # The second color of the flag.
SCREAMING_SNAKE_CASE_ = 2 # The third color of the flag.
SCREAMING_SNAKE_CASE_ = (red, white, blue)
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(__SCREAMING_SNAKE_CASE ) == 1:
return list(__SCREAMING_SNAKE_CASE )
__a = 0
__a = len(__SCREAMING_SNAKE_CASE ) - 1
__a = 0
while mid <= high:
if sequence[mid] == colors[0]:
__a , __a = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__a , __a = sequence[high], sequence[mid]
high -= 1
else:
__a = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by commas:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item.strip()) for item in user_input.split(',')]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 201 | 1 |
def _lowercase ( __lowerCamelCase : Dict ,__lowerCamelCase : int ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Any = 0
UpperCamelCase__ : List[Any] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase__ : List[Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
UpperCamelCase__ : Optional[int] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCamelCase__ : List[str] = left
UpperCamelCase__ : int = point
elif point > right:
UpperCamelCase__ : Tuple = right
UpperCamelCase__ : Any = point
else:
if item < current_item:
UpperCamelCase__ : str = point - 1
else:
UpperCamelCase__ : Tuple = point + 1
return None
def _lowercase ( __lowerCamelCase : List[str] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase__ : int = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase ,__lowerCamelCase ,point + 1 ,__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Any ) -> str:
'''simple docstring'''
if collection != sorted(__lowerCamelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_SCREAMING_SNAKE_CASE : Tuple = 0
if debug == 1:
_SCREAMING_SNAKE_CASE : str = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
_SCREAMING_SNAKE_CASE : List[Any] = 67
_SCREAMING_SNAKE_CASE : str = interpolation_search(collection, target)
if result is not None:
print(F'{target} found at positions: {result}')
else:
print("""Not found""")
| 344 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : Tuple=False ) -> int:
'''simple docstring'''
try:
UpperCamelCase__ : List[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ : str = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ : Any = strtobool(__lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
_SCREAMING_SNAKE_CASE : List[Any] = parse_flag_from_env("""RUN_SLOW""", default=False)
def _lowercase ( __lowerCamelCase : str ) -> str:
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests ,'''test is slow''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() ,'''test requires only a CPU''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() ,'''test requires a GPU''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() ,'''test requires a XPU''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Optional[int] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() ,'''test requires a `mps` backend support in `torch`''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() ,'''test requires the Hugging Face suite''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() ,'''test requires the bitsandbytes library''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() ,'''test requires TPU''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : str ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 ,'''test requires a GPU''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Dict ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 ,'''test requires a XPU''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 ,'''test requires multiple GPUs''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Optional[int] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 ,'''test requires multiple XPUs''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Any ) -> Any:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() ,'''test requires safetensors''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : int ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() ,'''test requires DeepSpeed''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' ,'''1.12.0''' ) ,'''test requires torch version >= 1.12.0''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Union[str, Any]=None ,__lowerCamelCase : List[str]=None ) -> str:
'''simple docstring'''
if test_case is None:
return partial(__lowerCamelCase ,version=__lowerCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' ,__lowerCamelCase ) ,F'test requires torch version >= {version}' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Any ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() ,'''test requires Tensorboard''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Any ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() ,'''test requires wandb''' )(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() ,'''test requires comet_ml''' )(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _lowercase ( __lowerCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available ,'''test requires at least one tracker to be available and for `comet_ml` to not be installed''' ,)(__lowerCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
a__ : Union[str, Any] = True
@classmethod
def __lowercase( cls : List[str] ) -> List[Any]:
UpperCamelCase__ : List[str] = tempfile.mkdtemp()
@classmethod
def __lowercase( cls : Optional[Any] ) -> str:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __lowercase( self : Any ) -> Union[str, Any]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__lowerCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
def __lowercase( self : str ) -> Any:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
def __lowercase( self : List[str], __lowerCamelCase : Union[mock.Mock, List[mock.Mock]] ) -> Dict:
UpperCamelCase__ : str = mocks if isinstance(__lowerCamelCase, (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _lowercase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = AcceleratorState()
UpperCamelCase__ : int = tensor[None].clone().to(state.device )
UpperCamelCase__ : Optional[Any] = gather(__lowerCamelCase ).cpu()
UpperCamelCase__ : Dict = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] ,__lowerCamelCase ):
return False
return True
class UpperCamelCase__ :
def __init__( self : Any, __lowerCamelCase : Optional[int], __lowerCamelCase : List[Any], __lowerCamelCase : List[Any] ) -> int:
UpperCamelCase__ : Union[str, Any] = returncode
UpperCamelCase__ : Union[str, Any] = stdout
UpperCamelCase__ : Any = stderr
async def _lowercase ( __lowerCamelCase : List[Any] ,__lowerCamelCase : Any ) -> int:
'''simple docstring'''
while True:
UpperCamelCase__ : Optional[Any] = await stream.readline()
if line:
callback(__lowerCamelCase )
else:
break
async def _lowercase ( __lowerCamelCase : List[Any] ,__lowerCamelCase : Optional[int]=None ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=None ,__lowerCamelCase : str=False ,__lowerCamelCase : Any=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' ,''' '''.join(__lowerCamelCase ) )
UpperCamelCase__ : str = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__lowerCamelCase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__lowerCamelCase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Any = []
def tee(__lowerCamelCase : int ,__lowerCamelCase : Tuple ,__lowerCamelCase : str ,__lowerCamelCase : Dict="" ):
UpperCamelCase__ : Any = line.decode('''utf-8''' ).rstrip()
sink.append(__lowerCamelCase )
if not quiet:
print(__lowerCamelCase ,__lowerCamelCase ,file=__lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout ,lambda __lowerCamelCase : tee(__lowerCamelCase ,__lowerCamelCase ,sys.stdout ,label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr ,lambda __lowerCamelCase : tee(__lowerCamelCase ,__lowerCamelCase ,sys.stderr ,label='''stderr:''' ) ) ),
] ,timeout=__lowerCamelCase ,)
return _RunOutput(await p.wait() ,__lowerCamelCase ,__lowerCamelCase )
def _lowercase ( __lowerCamelCase : Any ,__lowerCamelCase : List[str]=None ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : List[str]=180 ,__lowerCamelCase : Optional[int]=False ,__lowerCamelCase : Optional[int]=True ) -> _RunOutput:
'''simple docstring'''
UpperCamelCase__ : Dict = asyncio.get_event_loop()
UpperCamelCase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(__lowerCamelCase ,env=__lowerCamelCase ,stdin=__lowerCamelCase ,timeout=__lowerCamelCase ,quiet=__lowerCamelCase ,echo=__lowerCamelCase ) )
UpperCamelCase__ : str = ''' '''.join(__lowerCamelCase )
if result.returncode > 0:
UpperCamelCase__ : Union[str, Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
return result
class UpperCamelCase__ ( __lowerCamelCase ):
pass
def _lowercase ( __lowerCamelCase : List[str] ,__lowerCamelCase : Optional[int]=False ) -> List[str]:
'''simple docstring'''
try:
UpperCamelCase__ : Any = subprocess.check_output(__lowerCamelCase ,stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__lowerCamelCase ,'''decode''' ):
UpperCamelCase__ : str = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'Command `{" ".join(__lowerCamelCase )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 344 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a: str = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: List[Any] = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Dict = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__a: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 714 |
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> float:
return 1_0 - x * x
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__snake_case ) * equation(__snake_case ) >= 0:
raise ValueError("""Wrong space!""" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(__snake_case ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__snake_case ) * equation(__snake_case ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 402 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =KandinskyVaaControlnetPipeline
__lowerCAmelCase : Optional[Any] =['''image_embeds''', '''negative_image_embeds''', '''hint''']
__lowerCAmelCase : int =['''image_embeds''', '''negative_image_embeds''', '''hint''']
__lowerCAmelCase : List[Any] =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__lowerCAmelCase : Tuple =False
@property
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self :str):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
return 100
@property
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
torch.manual_seed(0)
_lowercase ={
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase =UNetaDConditionModel(**SCREAMING_SNAKE_CASE__)
return model
@property
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
torch.manual_seed(0)
_lowercase =VQModel(**self.dummy_movq_kwargs)
return model
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =self.dummy_unet
_lowercase =self.dummy_movq
_lowercase =DDIMScheduler(
num_train_timesteps=1000, beta_schedule='linear', beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, clip_sample=SCREAMING_SNAKE_CASE__, set_alpha_to_one=SCREAMING_SNAKE_CASE__, steps_offset=1, prediction_type='epsilon', thresholding=SCREAMING_SNAKE_CASE__, )
_lowercase ={
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase__ ( self :int, snake_case :int, snake_case :Optional[int]=0):
"""simple docstring"""
_lowercase =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
_lowercase =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to(
SCREAMING_SNAKE_CASE__)
# create hint
_lowercase =floats_tensor((1, 3, 64, 64), rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
_lowercase =torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
_lowercase =torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
_lowercase ={
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase ='cpu'
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**SCREAMING_SNAKE_CASE__)
_lowercase =pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
_lowercase =pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__))
_lowercase =output.images
_lowercase =pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__), return_dict=SCREAMING_SNAKE_CASE__, )[0]
_lowercase =image[0, -3:, -3:, -1]
_lowercase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase =np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :int):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy')
_lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
_lowercase =torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 2_5_5.0
_lowercase =hint.permute(2, 0, 1).unsqueeze(0)
_lowercase =KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa)
pipe_prior.to(SCREAMING_SNAKE_CASE__)
_lowercase =KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa)
_lowercase =pipeline.to(SCREAMING_SNAKE_CASE__)
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
_lowercase ='A robot, 4k photo'
_lowercase =torch.Generator(device='cuda').manual_seed(0)
_lowercase =pipe_prior(
SCREAMING_SNAKE_CASE__, generator=SCREAMING_SNAKE_CASE__, num_inference_steps=5, negative_prompt='', ).to_tuple()
_lowercase =torch.Generator(device='cuda').manual_seed(0)
_lowercase =pipeline(
image_embeds=SCREAMING_SNAKE_CASE__, negative_image_embeds=SCREAMING_SNAKE_CASE__, hint=SCREAMING_SNAKE_CASE__, generator=SCREAMING_SNAKE_CASE__, num_inference_steps=100, output_type='np', )
_lowercase =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__)
| 181 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict:
__lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {}
__lowerCamelCase : int = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]:
__lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,):
super().__init__()
__lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source')
__lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target')
__lowerCamelCase : List[Any] = self.get_char_lens(self.src_file)
__lowerCamelCase : List[Any] = max_source_length
__lowerCamelCase : List[str] = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Optional[int] = prefix
if n_obs is not None:
__lowerCamelCase : Dict = self.src_lens[:n_obs]
__lowerCamelCase : str = src_lang
__lowerCamelCase : Any = tgt_lang
def __len__( self : Tuple):
return len(self.src_lens)
def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Dict = index + 1 # linecache starts at 1
__lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
__lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowerCamelCase : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
)
__lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
__lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right')
__lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right')
__lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int):
return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()]
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch])
__lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch])
__lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch])
__lowerCamelCase : Optional[int] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
a =getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return list(itertools.chain.from_iterable(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
__lowerCamelCase : str = get_git_info()
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]:
with open(lowerCamelCase__ , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
with open(lowerCamelCase__ ) as f:
return json.load(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ )
__lowerCamelCase : Any = {
'repo_id': str(lowerCamelCase__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List:
return list(map(lowerCamelCase__ , lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
with open(lowerCamelCase__ , 'wb' ) as f:
return pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
def remove_articles(lowerCamelCase__ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
__lowerCamelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ )
__lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
__lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
__lowerCamelCase : Dict = 0
for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ):
em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
em /= len(lowerCamelCase__ )
return {"em": em}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
return model_prefix.startswith('rag' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowerCamelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
continue
__lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p]
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
return hparams, config
| 652 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowercase : List[Any] = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowercase : str = 'UperNetConfig'
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self :Tuple , a :int , a :int , a :Union[int, Tuple[int, int]] , a :Union[int, Tuple[int, int], str] = 0 , a :bool = False , a :Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
__UpperCamelCase : List[Any] = nn.Convad(
in_channels=a , out_channels=a , kernel_size=a , padding=a , bias=a , dilation=a , )
__UpperCamelCase : Any = nn.BatchNormad(a )
__UpperCamelCase : Optional[Any] = nn.ReLU()
def _lowerCamelCase ( self :List[str] , a :torch.Tensor ) -> torch.Tensor:
__UpperCamelCase : int = self.conv(a )
__UpperCamelCase : Any = self.batch_norm(a )
__UpperCamelCase : Union[str, Any] = self.activation(a )
return output
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self :List[str] , a :int , a :int , a :int ) -> None:
super().__init__()
__UpperCamelCase : Optional[int] = [
nn.AdaptiveAvgPoolad(a ),
UperNetConvModule(a , a , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(a ) , a )
def _lowerCamelCase ( self :Union[str, Any] , a :torch.Tensor ) -> torch.Tensor:
__UpperCamelCase : Tuple = input
for layer in self.layers:
__UpperCamelCase : Dict = layer(a )
return hidden_state
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self :Dict , a :Tuple[int, ...] , a :int , a :int , a :bool ) -> None:
super().__init__()
__UpperCamelCase : Optional[int] = pool_scales
__UpperCamelCase : Optional[Any] = align_corners
__UpperCamelCase : List[str] = in_channels
__UpperCamelCase : Union[str, Any] = channels
__UpperCamelCase : Union[str, Any] = []
for i, pool_scale in enumerate(a ):
__UpperCamelCase : str = UperNetPyramidPoolingBlock(pool_scale=a , in_channels=a , channels=a )
self.blocks.append(a )
self.add_module(str(a ) , a )
def _lowerCamelCase ( self :List[str] , a :torch.Tensor ) -> List[torch.Tensor]:
__UpperCamelCase : Union[str, Any] = []
for ppm in self.blocks:
__UpperCamelCase : Union[str, Any] = ppm(a )
__UpperCamelCase : str = nn.functional.interpolate(
a , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(a )
return ppm_outs
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self :List[Any] , a :Any , a :Optional[Any] ) -> int:
super().__init__()
__UpperCamelCase : Optional[int] = config
__UpperCamelCase : str = config.pool_scales # e.g. (1, 2, 3, 6)
__UpperCamelCase : Optional[int] = in_channels
__UpperCamelCase : List[str] = config.hidden_size
__UpperCamelCase : int = False
__UpperCamelCase : str = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__UpperCamelCase : List[Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__UpperCamelCase : Tuple = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__UpperCamelCase : Any = nn.ModuleList()
__UpperCamelCase : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__UpperCamelCase : Tuple = UperNetConvModule(a , self.channels , kernel_size=1 )
__UpperCamelCase : List[Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(a )
self.fpn_convs.append(a )
__UpperCamelCase : Union[str, Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowerCamelCase ( self :Tuple ) -> List[str]:
self.apply(self._init_weights )
def _lowerCamelCase ( self :str , a :int ) -> Tuple:
if isinstance(a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self :List[str] , a :List[str] ) -> int:
__UpperCamelCase : Optional[int] = inputs[-1]
__UpperCamelCase : Tuple = [x]
psp_outs.extend(self.psp_modules(a ) )
__UpperCamelCase : List[str] = torch.cat(a , dim=1 )
__UpperCamelCase : Any = self.bottleneck(a )
return output
def _lowerCamelCase ( self :List[Any] , a :torch.Tensor ) -> torch.Tensor:
# build laterals
__UpperCamelCase : List[str] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(a ) )
# build top-down path
__UpperCamelCase : str = len(a )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__UpperCamelCase : str = laterals[i - 1].shape[2:]
__UpperCamelCase : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=a , mode="bilinear" , align_corners=self.align_corners )
# build outputs
__UpperCamelCase : int = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__UpperCamelCase : Tuple = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
__UpperCamelCase : Any = torch.cat(a , dim=1 )
__UpperCamelCase : int = self.fpn_bottleneck(a )
__UpperCamelCase : int = self.classifier(a )
return output
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self :Any , a :Any , a :int = 2 , a :int = 3 , a :Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
__UpperCamelCase : int = config
__UpperCamelCase : Optional[Any] = config.auxiliary_in_channels
__UpperCamelCase : Union[str, Any] = config.auxiliary_channels
__UpperCamelCase : Optional[int] = config.auxiliary_num_convs
__UpperCamelCase : Optional[Any] = config.auxiliary_concat_input
__UpperCamelCase : str = in_index
__UpperCamelCase : List[Any] = (kernel_size // 2) * dilation
__UpperCamelCase : Any = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=a , padding=a , dilation=a ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=a , padding=a , dilation=a ) )
if self.num_convs == 0:
__UpperCamelCase : Union[str, Any] = nn.Identity()
else:
__UpperCamelCase : Union[str, Any] = nn.Sequential(*a )
if self.concat_input:
__UpperCamelCase : Any = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=a , padding=kernel_size // 2 )
__UpperCamelCase : Tuple = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
self.apply(self._init_weights )
def _lowerCamelCase ( self :Optional[int] , a :Dict ) -> List[str]:
if isinstance(a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self :Union[str, Any] , a :torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
__UpperCamelCase : str = encoder_hidden_states[self.in_index]
__UpperCamelCase : Optional[int] = self.convs(a )
if self.concat_input:
__UpperCamelCase : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__UpperCamelCase : str = self.classifier(a )
return output
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = UperNetConfig
_A = 'pixel_values'
_A = True
def _lowerCamelCase ( self :Tuple , a :List[Any] ) -> Optional[Any]:
if isinstance(a , a ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowerCamelCase ( self :Dict ) -> Any:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowerCamelCase ( self :List[str] , a :List[Any] , a :Optional[Any]=False ) -> Union[str, Any]:
if isinstance(a , a ):
__UpperCamelCase : Tuple = value
lowercase : Dict = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase : Dict = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , __lowercase , )
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :int , a :Tuple ) -> List[str]:
super().__init__(a )
__UpperCamelCase : Union[str, Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__UpperCamelCase : int = UperNetHead(a , in_channels=self.backbone.channels )
__UpperCamelCase : Any = UperNetFCNHead(a ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=a , config_class=_CONFIG_FOR_DOC )
def _lowerCamelCase ( self :Any , a :Optional[torch.Tensor] = None , a :Optional[bool] = None , a :Optional[bool] = None , a :Optional[torch.Tensor] = None , a :Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
__UpperCamelCase : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Union[str, Any] = output_attentions if output_attentions is not None else self.config.output_attentions
__UpperCamelCase : int = self.backbone.forward_with_filtered_kwargs(
a , output_hidden_states=a , output_attentions=a )
__UpperCamelCase : Optional[Any] = outputs.feature_maps
__UpperCamelCase : str = self.decode_head(a )
__UpperCamelCase : List[str] = nn.functional.interpolate(a , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=a )
__UpperCamelCase : Union[str, Any] = None
if self.auxiliary_head is not None:
__UpperCamelCase : Optional[Any] = self.auxiliary_head(a )
__UpperCamelCase : List[str] = nn.functional.interpolate(
a , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=a )
__UpperCamelCase : int = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
__UpperCamelCase : Any = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__UpperCamelCase : Optional[Any] = loss_fct(a , a )
__UpperCamelCase : Optional[int] = loss_fct(a , a )
__UpperCamelCase : Tuple = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__UpperCamelCase : Tuple = (logits,) + outputs[1:]
else:
__UpperCamelCase : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=a , logits=a , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 712 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : List[Any] = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 94 | 0 |
from math import isqrt
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowercase_ , lowercase_ ):
__a =False
return [i for i in range(2 , lowercase_ ) if is_prime[i]]
def UpperCamelCase_( _snake_case : List[Any] = 10**8 ):
"""simple docstring"""
__a =calculate_prime_numbers(max_number // 2 )
__a =0
__a =0
__a =len(lowercase_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 242 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 624 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = {}
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(_lowercase )
if nodea not in self.connections:
self.add_node(_lowercase )
_lowerCAmelCase = probability
def _lowercase ( self ):
"""simple docstring"""
return list(self.connections )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A (__lowerCamelCase :str , __lowerCamelCase :list[tuple[str, str, float]] , __lowerCamelCase :int ):
_lowerCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = Counter(graph.get_nodes() )
_lowerCAmelCase = start
for _ in range(__lowerCamelCase ):
_lowerCAmelCase = graph.transition(__lowerCamelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
'''simple docstring'''
import os
def A (__lowerCamelCase :Dict ):
_lowerCAmelCase = len(grid[0] )
_lowerCAmelCase = len(__lowerCamelCase )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__lowerCamelCase ):
for j in range(n_rows - 3 ):
_lowerCAmelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_lowerCAmelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_lowerCAmelCase = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_lowerCAmelCase = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_lowerCAmelCase = max(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if max_product > largest:
_lowerCAmelCase = max_product
return largest
def A ():
_lowerCAmelCase = []
with open(os.path.dirname(__lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
_lowerCAmelCase = [[int(__lowerCamelCase ) for i in grid[j]] for j in range(len(__lowerCamelCase ) )]
return largest_product(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 162 | 0 |
import math
class lowercase :
'''simple docstring'''
def UpperCamelCase__ (self , __a , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ (self , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCamelCase_( ) -> Tuple:
UpperCAmelCase__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
UpperCAmelCase__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
UpperCAmelCase__ = SelfOrganizingMap()
UpperCAmelCase__ = 3
UpperCAmelCase__ = 0.5
for _ in range(__UpperCamelCase ):
for j in range(len(__UpperCamelCase ) ):
# training sample
UpperCAmelCase__ = training_samples[j]
# Compute the winning vector
UpperCAmelCase__ = self_organizing_map.get_winner(__UpperCamelCase , __UpperCamelCase )
# Update the winning vector
UpperCAmelCase__ = self_organizing_map.update(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# classify test sample
UpperCAmelCase__ = [0, 0, 0, 1]
UpperCAmelCase__ = self_organizing_map.get_winner(__UpperCamelCase , __UpperCamelCase )
# results
print(f"Clusters that the test sample belongs to : {winner}" )
print(f"Weights that have been trained : {weights}" )
# running the main() function
if __name__ == "__main__":
main()
| 146 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 185 |
def UpperCamelCase_( _A :str )-> int:
UpperCamelCase__ = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
UpperCamelCase__ = hex_num[0] == "-"
if is_negative:
UpperCamelCase__ = hex_num[1:]
try:
UpperCamelCase__ = int(_A , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
UpperCamelCase__ = ""
while int_num > 0:
UpperCamelCase__ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : torch.FloatTensor
class UpperCamelCase ( snake_case_ , snake_case_ ):
@register_to_config
def __init__( self : str , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 20 , UpperCAmelCase__ : int = 768 , UpperCAmelCase__ : Union[str, Any]=77 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : str = "silu" , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "linear" , UpperCAmelCase__ : Optional[str] = "prd" , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , ) -> Union[str, Any]:
super().__init__()
_a : Optional[int] = num_attention_heads
_a : Union[str, Any] = attention_head_dim
_a : Tuple = num_attention_heads * attention_head_dim
_a : str = additional_embeddings
_a : str = time_embed_dim or inner_dim
_a : List[Any] = embedding_proj_dim or embedding_dim
_a : List[str] = clip_embed_dim or embedding_dim
_a : Optional[Any] = Timesteps(UpperCAmelCase__ , UpperCAmelCase__ , 0 )
_a : Dict = TimestepEmbedding(UpperCAmelCase__ , UpperCAmelCase__ , out_dim=UpperCAmelCase__ , act_fn=UpperCAmelCase__ )
_a : Optional[int] = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
if embedding_proj_norm_type is None:
_a : Optional[int] = None
elif embedding_proj_norm_type == "layer":
_a : List[str] = nn.LayerNorm(UpperCAmelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_a : str = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
if encoder_hid_proj_type is None:
_a : Optional[Any] = None
elif encoder_hid_proj_type == "linear":
_a : str = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_a : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCAmelCase__ ) )
if added_emb_type == "prd":
_a : List[str] = nn.Parameter(torch.zeros(1 , 1 , UpperCAmelCase__ ) )
elif added_emb_type is None:
_a : Union[str, Any] = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_a : Any = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , dropout=UpperCAmelCase__ , activation_fn="""gelu""" , attention_bias=UpperCAmelCase__ , )
for d in range(UpperCAmelCase__ )
] )
if norm_in_type == "layer":
_a : str = nn.LayerNorm(UpperCAmelCase__ )
elif norm_in_type is None:
_a : List[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_a : Tuple = nn.LayerNorm(UpperCAmelCase__ )
_a : Optional[int] = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_a : Union[str, Any] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCAmelCase__ , persistent=UpperCAmelCase__ )
_a : int = nn.Parameter(torch.zeros(1 , UpperCAmelCase__ ) )
_a : Dict = nn.Parameter(torch.zeros(1 , UpperCAmelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _lowercase ( self : str ) -> Dict[str, AttentionProcessor]:
_a : Tuple = {}
def fn_recursive_add_processors(UpperCAmelCase__ : str , UpperCAmelCase__ : torch.nn.Module , UpperCAmelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(UpperCAmelCase__ , """set_processor""" ):
_a : Dict = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCAmelCase__ , UpperCAmelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return processors
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> List[Any]:
_a : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCAmelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCAmelCase__ : str , UpperCAmelCase__ : torch.nn.Module , UpperCAmelCase__ : Dict ):
if hasattr(UpperCAmelCase__ , """set_processor""" ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
module.set_processor(UpperCAmelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCAmelCase__ , UpperCAmelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : str ) -> Optional[Any]:
self.set_attn_processor(AttnProcessor() )
def _lowercase ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[torch.Tensor, float, int] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.BoolTensor] = None , UpperCAmelCase__ : bool = True , ) -> Union[str, Any]:
_a : Tuple = hidden_states.shape[0]
_a : Dict = timestep
if not torch.is_tensor(UpperCAmelCase__ ):
_a : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCAmelCase__ ) and len(timesteps.shape ) == 0:
_a : List[Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_a : int = timesteps * torch.ones(UpperCAmelCase__ , dtype=timesteps.dtype , device=timesteps.device )
_a : int = self.time_proj(UpperCAmelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_a : Optional[Any] = timesteps_projected.to(dtype=self.dtype )
_a : List[Any] = self.time_embedding(UpperCAmelCase__ )
if self.embedding_proj_norm is not None:
_a : int = self.embedding_proj_norm(UpperCAmelCase__ )
_a : Optional[int] = self.embedding_proj(UpperCAmelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_a : Union[str, Any] = self.encoder_hidden_states_proj(UpperCAmelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_a : Tuple = self.proj_in(UpperCAmelCase__ )
_a : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_a : List[Any] = []
_a : Union[str, Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCAmelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_a : Any = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_a : List[Any] = hidden_states[:, None, :]
_a : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_a : Union[str, Any] = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCAmelCase__ , -1 , -1 )
additional_embeds.append(UpperCAmelCase__ )
_a : List[str] = torch.cat(
UpperCAmelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_a : int = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_a : Tuple = F.pad(
UpperCAmelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_a : Tuple = hidden_states + positional_embeddings
if attention_mask is not None:
_a : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_a : Optional[int] = F.pad(UpperCAmelCase__ , (0, self.additional_embeddings) , value=0.0 )
_a : List[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_a : List[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_a : Union[str, Any] = self.norm_in(UpperCAmelCase__ )
for block in self.transformer_blocks:
_a : str = block(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
_a : Tuple = self.norm_out(UpperCAmelCase__ )
if self.prd_embedding is not None:
_a : str = hidden_states[:, -1]
else:
_a : List[str] = hidden_states[:, additional_embeddings_len:]
_a : Dict = self.proj_to_clip_embeddings(UpperCAmelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCAmelCase__ )
def _lowercase ( self : Dict , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
_a : int = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 389 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_snake_case = get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
UpperCamelCase : str = '''all_checks'''
UpperCamelCase : Any = '''basic_checks'''
UpperCamelCase : Union[str, Any] = '''no_checks'''
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_a : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_a : List[str] = """ for """ + verification_name if verification_name is not None else """"""
if len(UpperCamelCase__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
class UpperCamelCase ( snake_case_ ):
pass
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_a : List[Any] = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase__ ) )
logger.info("""All the splits matched successfully.""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
if record_checksum:
_a : int = shaaaa()
with open(UpperCamelCase__ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B"""""" ):
m.update(UpperCamelCase__ )
_a : List[Any] = m.hexdigest()
else:
_a : Any = None
return {"num_bytes": os.path.getsize(UpperCamelCase__ ), "checksum": checksum}
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 389 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''conditional_detr'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=300 ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE="relu" ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1.0 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE="sine" ,_SCREAMING_SNAKE_CASE="resnet50" ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.25 ,**_SCREAMING_SNAKE_CASE ,) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCAmelCase_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = backbone_config.get('''model_type''' )
UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Optional[Any] = config_class.from_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : Any = backbone_config
UpperCAmelCase_ : Optional[int] = num_channels
UpperCAmelCase_ : int = num_queries
UpperCAmelCase_ : List[Any] = d_model
UpperCAmelCase_ : List[str] = encoder_ffn_dim
UpperCAmelCase_ : List[str] = encoder_layers
UpperCAmelCase_ : List[str] = encoder_attention_heads
UpperCAmelCase_ : Dict = decoder_ffn_dim
UpperCAmelCase_ : Any = decoder_layers
UpperCAmelCase_ : str = decoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = dropout
UpperCAmelCase_ : Union[str, Any] = attention_dropout
UpperCAmelCase_ : List[str] = activation_dropout
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : Any = init_std
UpperCAmelCase_ : Optional[Any] = init_xavier_std
UpperCAmelCase_ : Dict = encoder_layerdrop
UpperCAmelCase_ : List[str] = decoder_layerdrop
UpperCAmelCase_ : Optional[int] = encoder_layers
UpperCAmelCase_ : int = auxiliary_loss
UpperCAmelCase_ : Union[str, Any] = position_embedding_type
UpperCAmelCase_ : int = backbone
UpperCAmelCase_ : Dict = use_pretrained_backbone
UpperCAmelCase_ : Optional[Any] = dilation
# Hungarian matcher
UpperCAmelCase_ : int = class_cost
UpperCAmelCase_ : Union[str, Any] = bbox_cost
UpperCAmelCase_ : Union[str, Any] = giou_cost
# Loss coefficients
UpperCAmelCase_ : Optional[Any] = mask_loss_coefficient
UpperCAmelCase_ : List[str] = dice_loss_coefficient
UpperCAmelCase_ : List[Any] = cls_loss_coefficient
UpperCAmelCase_ : str = bbox_loss_coefficient
UpperCAmelCase_ : Optional[Any] = giou_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
@property
def a__ ( self ) -> int:
return self.encoder_attention_heads
@property
def a__ ( self ) -> int:
return self.d_model
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCAmelCase_ : List[Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : List[str] = self.__class__.model_type
return output
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = version.parse('''1.11''' )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def a__ ( self ) -> float:
return 1e-5
@property
def a__ ( self ) -> int:
return 12 | 712 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(_lowercase ) , _lowercase )
return number - int(_lowercase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3)) | 300 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowercase_ : Dict = parser.parse_args()
lowercase_ : int = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase_ : Any = CLIPImageProcessor()
lowercase_ : str = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowercase_ : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 64 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def A_ ( ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=lowercase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=lowercase , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=lowercase , help="""where to store parsed gold_data_path file""" , )
UpperCAmelCase_ : Dict = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
UpperCAmelCase_ : List[Any] = json.load(lowercase )
for dpr_record in tqdm(lowercase ):
UpperCAmelCase_ : List[Any] = dpr_record["""question"""]
UpperCAmelCase_ : Dict = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(lowercase ) + """\n""" )
if __name__ == "__main__":
main()
| 470 | 0 |
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase__ = {}
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=1 ) -> Optional[int]:
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCAmelCase__ = [[w, v]]
if not self.graph.get(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = []
def a ( self : int ) -> int:
return list(self.graph )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=-2 , SCREAMING_SNAKE_CASE__ : int=-1 ) -> List[str]:
if s == d:
return []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
if s == -2:
lowerCAmelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE__ ) != 0:
lowerCAmelCase__ = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
lowerCAmelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return visited
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=-1 ) -> int:
if c == -1:
lowerCAmelCase__ = floor(random() * 10_000 ) + 10
for i in range(SCREAMING_SNAKE_CASE__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase__ = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=-2 ) -> Tuple:
lowerCAmelCase__ = deque()
lowerCAmelCase__ = []
if s == -2:
lowerCAmelCase__ = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
while d:
lowerCAmelCase__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase__ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
return len(self.graph[u] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=-2 ) -> List[str]:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
if s == -2:
lowerCAmelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = s
lowerCAmelCase__ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(SCREAMING_SNAKE_CASE__ ) != 0:
lowerCAmelCase__ = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
lowerCAmelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return sorted_nodes
def a ( self : Any ) -> str:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = -2
lowerCAmelCase__ = []
lowerCAmelCase__ = s
lowerCAmelCase__ = False
lowerCAmelCase__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase__ = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
lowerCAmelCase__ = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
lowerCAmelCase__ = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = s
lowerCAmelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return list(SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> Optional[int]:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = -2
lowerCAmelCase__ = []
lowerCAmelCase__ = s
lowerCAmelCase__ = False
lowerCAmelCase__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase__ = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
lowerCAmelCase__ = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
lowerCAmelCase__ = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = s
lowerCAmelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return False
def a ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any]=-2 , SCREAMING_SNAKE_CASE__ : str=-1 ) -> List[str]:
lowerCAmelCase__ = time()
self.dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = time()
return end - begin
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=-2 ) -> Union[str, Any]:
lowerCAmelCase__ = time()
self.bfs(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = time()
return end - begin
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple ) -> Dict:
lowerCAmelCase__ = {}
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 ) -> Optional[int]:
# check if the u exists
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCAmelCase__ = [[w, v]]
# add the other way
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCAmelCase__ = [[w, u]]
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE__ )
# the other way round
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(SCREAMING_SNAKE_CASE__ )
def a ( self : int , SCREAMING_SNAKE_CASE__ : Any=-2 , SCREAMING_SNAKE_CASE__ : List[str]=-1 ) -> List[Any]:
if s == d:
return []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
if s == -2:
lowerCAmelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE__ ) != 0:
lowerCAmelCase__ = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
lowerCAmelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return visited
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=-1 ) -> str:
if c == -1:
lowerCAmelCase__ = floor(random() * 10_000 ) + 10
for i in range(SCREAMING_SNAKE_CASE__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase__ = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=-2 ) -> Any:
lowerCAmelCase__ = deque()
lowerCAmelCase__ = []
if s == -2:
lowerCAmelCase__ = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
while d:
lowerCAmelCase__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
return len(self.graph[u] )
def a ( self : Optional[Any] ) -> Dict:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = -2
lowerCAmelCase__ = []
lowerCAmelCase__ = s
lowerCAmelCase__ = False
lowerCAmelCase__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase__ = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
lowerCAmelCase__ = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
lowerCAmelCase__ = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = s
lowerCAmelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return list(SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = -2
lowerCAmelCase__ = []
lowerCAmelCase__ = s
lowerCAmelCase__ = False
lowerCAmelCase__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase__ = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
lowerCAmelCase__ = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
lowerCAmelCase__ = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = s
lowerCAmelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return False
def a ( self : Dict ) -> Optional[int]:
return list(self.graph )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=-2 , SCREAMING_SNAKE_CASE__ : List[str]=-1 ) -> Dict:
lowerCAmelCase__ = time()
self.dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = time()
return end - begin
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=-2 ) -> Tuple:
lowerCAmelCase__ = time()
self.bfs(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = time()
return end - begin
| 702 |
UpperCamelCase = 9.80_665
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float = g ):
"""simple docstring"""
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 125 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : int ):
if height >= 1:
move_tower(height - 1 ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
move_disk(__lowerCamelCase ,__lowerCamelCase )
move_tower(height - 1 ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : str ):
print("moving disk from" ,__lowerCamelCase ,"to" ,__lowerCamelCase )
def UpperCAmelCase_ ( ):
lowercase_ :int = int(input("Height of hanoi: " ).strip() )
move_tower(__lowerCamelCase ,"A" ,"B" ,"C" )
if __name__ == "__main__":
main()
| 172 |
'''simple docstring'''
from __future__ import annotations
class a_ :
def __init__( self : List[str] , lowercase : Optional[Any]=None ):
"""simple docstring"""
lowercase_ :Optional[int] = data
lowercase_ :int = None
def __repr__( self : Dict ):
"""simple docstring"""
lowercase_ :Any = []
lowercase_ :Tuple = self
while temp:
string_rep.append(F'{temp.data}' )
lowercase_ :Optional[Any] = temp.next
return "->".join(lowercase )
def UpperCAmelCase_ ( __lowerCamelCase : list ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase_ :int = Node(elements_list[0] )
for i in range(1 ,len(__lowerCamelCase ) ):
lowercase_ :Optional[Any] = Node(elements_list[i] )
lowercase_ :Optional[Any] = current.next
return head
def UpperCAmelCase_ ( __lowerCamelCase : Node ):
if head_node is not None and isinstance(__lowerCamelCase ,__lowerCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCAmelCase_ ( ):
from doctest import testmod
testmod()
lowercase_ :Dict = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__lowerCamelCase )
print("Elements in Reverse:" )
print_reverse(__lowerCamelCase )
if __name__ == "__main__":
main()
| 172 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _UpperCAmelCase ( __A : Tuple ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
def SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE : ArgumentParser ) -> Dict:
a_ : List[Any] = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the model to download''' )
download_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : bool ) -> Tuple:
a_ : int = model
a_ : Any = cache
a_ : List[Any] = force
a_ : Optional[Any] = trust_remote_code
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 666 |
'''simple docstring'''
def _UpperCAmelCase ( __A : str , __A : str ):
def get_matched_characters(__A : str , __A : str ) -> str:
a_ : Union[str, Any] = []
a_ : int = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ : Any = int(max(0 , i - limit ) )
a_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
a_ : Any = f'{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}'
return "".join(__A )
# matching characters
a_ : Optional[Any] = get_matched_characters(__A , __A )
a_ : int = get_matched_characters(__A , __A )
a_ : Any = len(__A )
# transposition
a_ : List[Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
a_ : Dict = 0.0
else:
a_ : Optional[int] = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 666 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
UpperCamelCase = 6_378_137.0
UpperCamelCase = 6_356_752.314_245
UpperCamelCase = 637_8137
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
"""simple docstring"""
lowerCAmelCase__ = (AXIS_A - AXIS_B) / AXIS_A
lowerCAmelCase__ = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
lowerCAmelCase__ = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
lowerCAmelCase__ = radians(lowerCAmelCase_ )
lowerCAmelCase__ = radians(lowerCAmelCase_ )
# Equation
lowerCAmelCase__ = sin((phi_a - phi_a) / 2 )
lowerCAmelCase__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowerCAmelCase__ = sqrt(sin_sq_phi + (cos(lowerCAmelCase_ ) * cos(lowerCAmelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
a_ : int = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
a_ : int = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
a_ : Tuple = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
a_ : Tuple = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
a_ : Optional[Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
a_ : List[str] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
a_ : Optional[Any] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCAmelCase ( _UpperCamelCase : int = 1_00 ) -> int:
'''simple docstring'''
return (generate_random_hand() for _ in range(_UpperCamelCase ))
@pytest.mark.parametrize('hand, expected' , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
assert PokerHand(_UpperCamelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
assert PokerHand(_UpperCamelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = PokerHand(_UpperCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str ) -> int:
'''simple docstring'''
assert PokerHand(_UpperCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
assert PokerHand(_UpperCamelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS]
SCREAMING_SNAKE_CASE = poker_hands.copy()
shuffle(_UpperCamelCase )
SCREAMING_SNAKE_CASE = chain(sorted(_UpperCamelCase ) )
for index, hand in enumerate(_UpperCamelCase ):
assert hand == poker_hands[index]
def __lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_UpperCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = PokerHand('2C 4S AS 3D 5C' )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'poker_hands.txt' )
with open(_UpperCamelCase ) as file_hand:
for line in file_hand:
SCREAMING_SNAKE_CASE = line[:14].strip()
SCREAMING_SNAKE_CASE = line[15:].strip()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase )
SCREAMING_SNAKE_CASE = player.compare_with(_UpperCamelCase )
if output == "Win":
answer += 1
assert answer == 3_76
| 439 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _SCREAMING_SNAKE_CASE :
a_ : float
a_ : TreeNode | None = None
a_ : TreeNode | None = None
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> bool:
# Validation
def is_valid_tree(snake_case__ ) -> bool:
if node is None:
return True
if not isinstance(snake_case__ , snake_case__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case__ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
snake_case__ , snake_case__ , snake_case__ ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case__ )
)
return is_binary_search_tree_recursive_check(snake_case__ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase ):
a_ : Dict = '''focalnet'''
def __init__(self , UpperCAmelCase=2_2_4 , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=9_6 , UpperCAmelCase=False , UpperCAmelCase=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , UpperCAmelCase=[2, 2, 6, 2] , UpperCAmelCase=[2, 2, 2, 2] , UpperCAmelCase=[3, 3, 3, 3] , UpperCAmelCase="gelu" , UpperCAmelCase=4.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=False , UpperCAmelCase=1e-4 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=3_2 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase)
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embed_dim
__UpperCAmelCase =use_conv_embed
__UpperCAmelCase =hidden_sizes
__UpperCAmelCase =depths
__UpperCAmelCase =focal_levels
__UpperCAmelCase =focal_windows
__UpperCAmelCase =hidden_act
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =use_layerscale
__UpperCAmelCase =layerscale_value
__UpperCAmelCase =use_post_layernorm
__UpperCAmelCase =use_post_layernorm_in_modulation
__UpperCAmelCase =normalize_modulator
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =encoder_stride
__UpperCAmelCase =['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths) + 1)]
__UpperCAmelCase , __UpperCAmelCase =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names)
| 142 | 1 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowerCAmelCase: Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowercase_ (lowercase__ ):
def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(lowercase_)
def __UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_) -> str:
a__ , a__ ={}, {}
if padding is not None:
a__ =padding
if truncation is not None:
a__ =truncation
if top_k is not None:
a__ =top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase_ , lowercase_ = None , **lowercase_) -> List[Any]:
if isinstance(lowercase_ , (Image.Image, str)) and isinstance(lowercase_ , lowercase_):
a__ ={'image': image, 'question': question}
else:
a__ =image
a__ =super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCamelCase ( self , lowercase_ , lowercase_=False , lowercase_=False) -> Optional[int]:
a__ =load_image(inputs['image'])
a__ =self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_)
a__ =self.image_processor(images=lowercase_ , return_tensors=self.framework)
model_inputs.update(lowercase_)
return model_inputs
def __UpperCamelCase ( self , lowercase_) -> Optional[Any]:
a__ =self.model(**lowercase_)
return model_outputs
def __UpperCamelCase ( self , lowercase_ , lowercase_=5) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
a__ =self.model.config.num_labels
if self.framework == "pt":
a__ =model_outputs.logits.sigmoid()[0]
a__ , a__ =probs.topk(lowercase_)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
a__ =scores.tolist()
a__ =ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_)]
| 20 |
"""simple docstring"""
lowerCAmelCase__ ="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _a ( ) -> None:
__SCREAMING_SNAKE_CASE = input('''Enter message: ''' )
__SCREAMING_SNAKE_CASE = input('''Enter key [alphanumeric]: ''' )
__SCREAMING_SNAKE_CASE = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__SCREAMING_SNAKE_CASE = '''encrypt'''
__SCREAMING_SNAKE_CASE = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith('''d''' ):
__SCREAMING_SNAKE_CASE = '''decrypt'''
__SCREAMING_SNAKE_CASE = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(f"""\n{mode.title()}ed message:""" )
print(UpperCAmelCase__ )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , '''encrypt''' )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , '''decrypt''' )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = key.upper()
for symbol in message:
__SCREAMING_SNAKE_CASE = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 482 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : int = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _UpperCamelCase ( _A , _A ):
'''simple docstring'''
__UpperCamelCase : Tuple = """dinat"""
__UpperCamelCase : Optional[int] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[Any] , snake_case_ : Tuple=4 , snake_case_ : Optional[Any]=3 , snake_case_ : Tuple=64 , snake_case_ : List[Any]=[3, 4, 6, 5] , snake_case_ : int=[2, 4, 8, 16] , snake_case_ : str=7 , snake_case_ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , snake_case_ : List[Any]=3.0 , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[int]=0.0 , snake_case_ : str=0.0 , snake_case_ : Optional[Any]=0.1 , snake_case_ : str="gelu" , snake_case_ : Optional[int]=0.02 , snake_case_ : List[str]=1e-5 , snake_case_ : str=0.0 , snake_case_ : Optional[Any]=None , snake_case_ : Any=None , **snake_case_ : Any , ):
super().__init__(**snake_case_ )
UpperCamelCase_: int = patch_size
UpperCamelCase_: Union[str, Any] = num_channels
UpperCamelCase_: Dict = embed_dim
UpperCamelCase_: List[str] = depths
UpperCamelCase_: int = len(snake_case_ )
UpperCamelCase_: Optional[Any] = num_heads
UpperCamelCase_: Optional[int] = kernel_size
UpperCamelCase_: Union[str, Any] = dilations
UpperCamelCase_: List[str] = mlp_ratio
UpperCamelCase_: List[str] = qkv_bias
UpperCamelCase_: Union[str, Any] = hidden_dropout_prob
UpperCamelCase_: Any = attention_probs_dropout_prob
UpperCamelCase_: Any = drop_path_rate
UpperCamelCase_: Union[str, Any] = hidden_act
UpperCamelCase_: Any = layer_norm_eps
UpperCamelCase_: List[str] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase_: List[str] = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
UpperCamelCase_: Any = layer_scale_init_value
UpperCamelCase_: str = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(snake_case_ ) + 1 )]
UpperCamelCase_, UpperCamelCase_: List[str] = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 670 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCAmelCase__ : int = """lm_head"""
lowerCAmelCase__ : List[str] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
lowerCAmelCase__ : str = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
lowerCAmelCase__ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase__ : Optional[int] = value
elif weight_type == "weight_g":
lowerCAmelCase__ : Any = value
elif weight_type == "weight_v":
lowerCAmelCase__ : Union[str, Any] = value
elif weight_type == "bias":
lowerCAmelCase__ : str = value
else:
lowerCAmelCase__ : List[str] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : List[Any] = fairseq_model.state_dict()
lowerCAmelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ : Any = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ : List[str] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase__ : List[str] = True
if "*" in mapped_key:
lowerCAmelCase__ : Optional[Any] = name.split(UpperCamelCase )[0].split(""".""" )[-2]
lowerCAmelCase__ : Optional[Any] = mapped_key.replace("""*""" , UpperCamelCase )
if "weight_g" in name:
lowerCAmelCase__ : Optional[int] = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ : Any = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ : str = """weight"""
else:
lowerCAmelCase__ : List[Any] = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ : Any = name.split(""".""" )
lowerCAmelCase__ : List[Any] = int(items[0] )
lowerCAmelCase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase__ : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase__ : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase__ : Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ : int = UniSpeechConfig.from_pretrained(UpperCamelCase )
else:
lowerCAmelCase__ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase__ : List[Any] = Dictionary.load_from_json(UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ : str = target_dict.pad_index
lowerCAmelCase__ : Union[str, Any] = target_dict.bos_index
lowerCAmelCase__ : int = target_dict.eos_index
lowerCAmelCase__ : Optional[Any] = len(target_dict.symbols )
lowerCAmelCase__ : Tuple = os.path.join(UpperCamelCase , """vocab.json""" )
if not os.path.isdir(UpperCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCamelCase ) )
return
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ : int = 42
lowerCAmelCase__ : Dict = 43
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCamelCase , )
lowerCAmelCase__ : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
lowerCAmelCase__ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase , return_attention_mask=UpperCamelCase , )
lowerCAmelCase__ : Any = WavaVecaProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = UniSpeechForCTC(UpperCamelCase )
else:
lowerCAmelCase__ : int = UniSpeechForPreTraining(UpperCamelCase )
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase__ : Dict = model[0].eval()
recursively_load_weights(UpperCamelCase , UpperCamelCase , UpperCamelCase )
hf_unispeech.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 565 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = ''''''
__lowercase : Tuple = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> Optional[int]:
super().__init__(self ,**__UpperCAmelCase )
lowerCAmelCase__ : Dict = repo_info
lowerCAmelCase__ : Dict = token
lowerCAmelCase__ : List[str] = None
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowerCAmelCase__ : Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCAmelCase__ : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCAmelCase ): {"""name""": str(__UpperCAmelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = "rb" ,**__UpperCAmelCase ,) -> int:
if not isinstance(self.repo_info ,__UpperCAmelCase ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
lowerCAmelCase__ : List[str] = hf_hub_url(self.repo_info.id ,__UpperCAmelCase ,revision=self.repo_info.sha )
return fsspec.open(
__UpperCAmelCase ,mode=__UpperCAmelCase ,headers=get_authentication_headers_for_url(__UpperCAmelCase ,use_auth_token=self.token ) ,client_kwargs={"""trust_env""": True} ,).open()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
self._get_dirs()
lowerCAmelCase__ : List[Any] = self._strip_protocol(__UpperCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,**__UpperCAmelCase ) -> List[str]:
self._get_dirs()
lowerCAmelCase__ : str = PurePosixPath(path.strip("""/""" ) )
lowerCAmelCase__ : Tuple = {}
for p, f in self.dir_cache.items():
lowerCAmelCase__ : str = PurePosixPath(p.strip("""/""" ) )
lowerCAmelCase__ : int = p.parent
if root == path:
lowerCAmelCase__ : Optional[Any] = f
lowerCAmelCase__ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 565 | 1 |
'''simple docstring'''
import math
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 1:
_lowerCAmelCase = F'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_lowerCAmelCase = int(math.log(number // 3 , 2 ) ) + 2
_lowerCAmelCase = [3, 5]
_lowerCAmelCase = 2
_lowerCAmelCase = 3
for block in range(1 , SCREAMING_SNAKE_CASE_ ):
for _ in range(SCREAMING_SNAKE_CASE_ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
try:
_SCREAMING_SNAKE_CASE : int = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 721 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = F'''Expected string as input, found {type(SCREAMING_SNAKE_CASE_ )}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = F'''Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE_ )}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = input_str.split("_" )
_lowerCAmelCase = 0 if use_pascal else 1
_lowerCAmelCase = words[start_index:]
_lowerCAmelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
_lowerCAmelCase = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 489 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
def __init__( self , a__=None , **a__):
super().__init__(features=a__)
A__ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self , a__):
import torch
if isinstance(a__ , a__) and column:
if all(
isinstance(a__ , torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column):
return torch.stack(a__)
return column
def snake_case_ ( self , a__):
import torch
if isinstance(a__ , (str, bytes, type(a__))):
return value
elif isinstance(a__ , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
A__ = {}
if isinstance(a__ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
A__ = {'''dtype''': torch.intaa}
elif isinstance(a__ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
A__ = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a__ , PIL.Image.Image):
A__ = np.asarray(a__)
return torch.tensor(a__ , **{**default_dtype, **self.torch_tensor_kwargs})
def snake_case_ ( self , a__):
import torch
# support for torch, tf, jax etc.
if hasattr(a__ , '''__array__''') and not isinstance(a__ , torch.Tensor):
A__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a__ , np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a__) for substruct in data_struct])
elif isinstance(a__ , (list, tuple)):
return self._consolidate([self.recursive_tensorize(a__) for substruct in data_struct])
return self._tensorize(a__)
def snake_case_ ( self , a__):
return map_nested(self._recursive_tensorize , a__ , map_list=a__)
def snake_case_ ( self , a__):
A__ = self.numpy_arrow_extractor().extract_row(a__)
A__ = self.python_features_decoder.decode_row(a__)
return self.recursive_tensorize(a__)
def snake_case_ ( self , a__):
A__ = self.numpy_arrow_extractor().extract_column(a__)
A__ = self.python_features_decoder.decode_column(a__ , pa_table.column_names[0])
A__ = self.recursive_tensorize(a__)
A__ = self._consolidate(a__)
return column
def snake_case_ ( self , a__):
A__ = self.numpy_arrow_extractor().extract_batch(a__)
A__ = self.python_features_decoder.decode_batch(a__)
A__ = self.recursive_tensorize(a__)
for column_name in batch:
A__ = self._consolidate(batch[column_name])
return batch
| 632 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _UpperCAmelCase :
def __init__( self , a__ , a__=1_3 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=9_9 , a__=3_2 , a__=2 , a__=4 , a__=3_7 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_1_2 , a__=1_6 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = relative_attention
A__ = position_biased_input
A__ = pos_att_type
A__ = scope
def snake_case_ ( self):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__):
A__ = TFDebertaVaModel(config=a__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = [input_ids, input_mask]
A__ = model(a__)
A__ = model(a__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__):
A__ = TFDebertaVaForMaskedLM(config=a__)
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__):
A__ = self.num_labels
A__ = TFDebertaVaForSequenceClassification(config=a__)
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__):
A__ = self.num_labels
A__ = TFDebertaVaForTokenClassification(config=a__)
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__):
A__ = TFDebertaVaForQuestionAnswering(config=a__)
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(a__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case_ ( self):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
UpperCamelCase__ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def snake_case_ ( self):
A__ = TFDebertaVaModelTester(self)
A__ = ConfigTester(self , config_class=a__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__)
@slow
def snake_case_ ( self):
A__ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''')
self.assertIsNotNone(a__)
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''')
def snake_case_ ( self):
pass
@slow
def snake_case_ ( self):
A__ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''')
A__ = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
A__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
A__ = model(a__ , attention_mask=a__)[0]
A__ = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , a__ , atol=1e-4)
| 632 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class SCREAMING_SNAKE_CASE:
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=64 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = vocab_size - 1
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.prepare_config_and_inputs()
__lowercase = True
return config, input_ids, input_mask, token_labels
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
__lowercase = GPTNeoXModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__lowercase = True
__lowercase = GPTNeoXModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = GPTNeoXForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = GPTNeoXForQuestionAnswering(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = GPTNeoXForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = GPTNeoXForTokenClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__lowercase = True
__lowercase = GPTNeoXForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
__lowercase = output_from_no_past["""hidden_states"""][0]
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
__lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase ,__lowercase = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( __A , __A , __A , unittest.TestCase ):
snake_case_ : Optional[int] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case_ : str = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ : List[Any] = False
snake_case_ : Tuple = False
snake_case_ : Any = False
snake_case_ : int = False
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = GPTNeoXModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=64 , num_attention_heads=8 )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
def snake_case__ ( self ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def snake_case__ ( self ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def snake_case__ ( self , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ids_tensor([1, 10] , config.vocab_size )
__lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = GPTNeoXModel(lowerCamelCase__ )
original_model.to(lowerCamelCase__ )
original_model.eval()
__lowercase = original_model(lowerCamelCase__ ).last_hidden_state
__lowercase = original_model(lowerCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = {"""type""": scaling_type, """factor""": 10.0}
__lowercase = GPTNeoXModel(lowerCamelCase__ )
scaled_model.to(lowerCamelCase__ )
scaled_model.eval()
__lowercase = scaled_model(lowerCamelCase__ ).last_hidden_state
__lowercase = scaled_model(lowerCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
__lowercase = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase__ )
__lowercase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCamelCase__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__lowercase = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
__lowercase = model.generate(**lowerCamelCase__ , do_sample=lowerCamelCase__ , max_new_tokens=20 )
__lowercase = tokenizer.batch_decode(lowerCamelCase__ )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 163 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE:
@staticmethod
def snake_case__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
snake_case_ : Dict = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__lowercase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__lowercase = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__lowercase = vqa_pipeline(lowerCamelCase__ , top_k=1 )
self.assertEqual(
lowerCamelCase__ , [
[{"""score""": ANY(lowerCamelCase__ ), """answer""": ANY(lowerCamelCase__ )}],
[{"""score""": ANY(lowerCamelCase__ ), """answer""": ANY(lowerCamelCase__ )}],
] , )
@require_torch
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__lowercase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__lowercase = """How many cats are there?"""
__lowercase = vqa_pipeline(image=lowerCamelCase__ , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
lowerCamelCase__ , [{"""score""": ANY(lowerCamelCase__ ), """answer""": ANY(lowerCamelCase__ )}, {"""score""": ANY(lowerCamelCase__ ), """answer""": ANY(lowerCamelCase__ )}] )
__lowercase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
lowerCamelCase__ , [{"""score""": ANY(lowerCamelCase__ ), """answer""": ANY(lowerCamelCase__ )}, {"""score""": ANY(lowerCamelCase__ ), """answer""": ANY(lowerCamelCase__ )}] )
@slow
@require_torch
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
__lowercase = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
__lowercase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__lowercase = """How many cats are there?"""
__lowercase = vqa_pipeline(image=lowerCamelCase__ , question=lowerCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
__lowercase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
__lowercase = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
pass
| 163 | 1 |
"""simple docstring"""
import math
def _lowerCAmelCase(a : int ) -> list[int]:
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =int(math.sqrt(UpperCAmelCase_ ) ) # Size of every segment
_SCREAMING_SNAKE_CASE =[True] * (end + 1)
_SCREAMING_SNAKE_CASE =[]
while start <= end:
if temp[start] is True:
in_prime.append(UpperCAmelCase_ )
for i in range(start * start , end + 1 , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE =False
start += 1
prime += in_prime
_SCREAMING_SNAKE_CASE =end + 1
_SCREAMING_SNAKE_CASE =min(2 * end , UpperCAmelCase_ )
while low <= n:
_SCREAMING_SNAKE_CASE =[True] * (high - low + 1)
for each in in_prime:
_SCREAMING_SNAKE_CASE =math.floor(low / each ) * each
if t < low:
t += each
for j in range(UpperCAmelCase_ , high + 1 , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE =False
for j in range(len(UpperCAmelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
_SCREAMING_SNAKE_CASE =high + 1
_SCREAMING_SNAKE_CASE =min(high + end , UpperCAmelCase_ )
return prime
print(sieve(1_0**6))
| 255 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[int]=30 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=0.9 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , )-> List[str]:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"shortest_edge": 30}
UpperCamelCase = crop_size if crop_size is not None else {"height": 30, "width": 30}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize_and_center_crop
UpperCamelCase = size
UpperCamelCase = crop_pct
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> str:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> str:
"""simple docstring"""
UpperCamelCase = PoolFormerImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) )
def _SCREAMING_SNAKE_CASE ( self : int )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Any:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Dict:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> List[str]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : str )-> str:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 554 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__lowerCAmelCase : Optional[Any] = trt.Logger(trt.Logger.WARNING)
__lowerCAmelCase : int = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__lowerCAmelCase : Dict = logging.getLogger(__name__)
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=3_84,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=1_28,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__lowerCAmelCase : Tuple = parser.parse_args()
if args.tokenizer_name:
__lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__lowerCAmelCase : int = args.per_device_eval_batch_size
__lowerCAmelCase : Union[str, Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Dict = "temp_engine/bert-fp32.engine"
if args.fpaa:
__lowerCAmelCase : Any = "temp_engine/bert-fp16.engine"
if args.inta:
__lowerCAmelCase : Optional[Any] = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__lowerCAmelCase : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__lowerCAmelCase : Optional[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__lowerCAmelCase : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__lowerCAmelCase : str = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__lowerCAmelCase : Any = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__lowerCAmelCase : Any = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
lowerCAmelCase__ = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
lowerCAmelCase__ = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase__ )
# start time
lowerCAmelCase__ = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase__ ) for d_inp in d_inputs] + [int(lowerCamelCase__ ), int(lowerCamelCase__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = end_time - start_time
lowerCAmelCase__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__lowerCAmelCase : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase : str = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__lowerCAmelCase : Dict = raw_datasets["validation"].column_names
__lowerCAmelCase : Any = "question" if "question" in column_names else column_names[0]
__lowerCAmelCase : List[str] = "context" if "context" in column_names else column_names[1]
__lowerCAmelCase : Any = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__lowerCAmelCase : Optional[Any] = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__lowerCAmelCase : int = min(args.max_seq_length, tokenizer.model_max_length)
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase__ , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase__ = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase__ = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase__ = tokenized_examples.sequence_ids(lowerCamelCase__ )
lowerCAmelCase__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__lowerCAmelCase : List[str] = raw_datasets["validation"]
# Validation Feature Creation
__lowerCAmelCase : str = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__lowerCAmelCase : Optional[Any] = default_data_collator
__lowerCAmelCase : int = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__lowerCAmelCase : Any = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="eval" ):
"""simple docstring"""
lowerCAmelCase__ = postprocess_qa_predictions(
examples=lowerCamelCase__ , features=lowerCamelCase__ , predictions=lowerCamelCase__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase__ = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase__ = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowerCAmelCase__ = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase__ , label_ids=lowerCamelCase__ )
__lowerCAmelCase : List[Any] = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(lowerCamelCase__ ) ) * engine.get_binding_dtype(lowerCamelCase__ ).itemsize
# Allocate device memory for inputs and outputs.
__lowerCAmelCase : Dict = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__lowerCAmelCase : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__lowerCAmelCase : List[str] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__lowerCAmelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
__lowerCAmelCase : Union[str, Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__lowerCAmelCase : Any = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
__lowerCAmelCase : List[str] = 0.0
__lowerCAmelCase : str = 0
__lowerCAmelCase : Dict = timeit.default_timer()
__lowerCAmelCase : List[str] = None
for step, batch in enumerate(eval_dataloader):
__lowerCAmelCase , __lowerCAmelCase : Tuple = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__lowerCAmelCase , __lowerCAmelCase : List[str] = outputs
__lowerCAmelCase : Dict = torch.tensor(start_logits)
__lowerCAmelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__lowerCAmelCase : List[str] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
__lowerCAmelCase : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
__lowerCAmelCase : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__lowerCAmelCase : Union[str, Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
__lowerCAmelCase : str = nested_truncate(all_preds, len(eval_dataset))
__lowerCAmelCase : str = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 10_00 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 10_00))
logger.info("Total Number of Inference = %d", niter)
__lowerCAmelCase : Any = post_processing_function(eval_examples, eval_dataset, all_preds)
__lowerCAmelCase : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}")
| 674 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
lowerCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = weights[0][0][0]
lowerCAmelCase__ = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowerCAmelCase__ = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowerCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowerCAmelCase__ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch_model.reformer
# word embeds
lowerCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowerCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowerCAmelCase__ = np.asarray(weights[7][0] )
lowerCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowerCAmelCase__ = np.asarray(weights[9][0] )
lowerCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | 1 |
from __future__ import annotations
from math import pi, sqrt
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> tuple:
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191 |
_lowercase = 9.8_06_65
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = g ) -> float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 306 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def a__ ( _SCREAMING_SNAKE_CASE = 1_000_000 ):
"""simple docstring"""
UpperCamelCase = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 704 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCamelCase = emb.weight.data
return lin_layer
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
UpperCamelCase = mam_aaa["args"] or mam_aaa["cfg"]["model"]
UpperCamelCase = mam_aaa["model"]
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
UpperCamelCase = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCamelCase = MaMaaaConfig(
vocab_size=_SCREAMING_SNAKE_CASE , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
UpperCamelCase = state_dict["decoder.embed_tokens.weight"]
UpperCamelCase = MaMaaaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
UpperCamelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 544 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowerCamelCase : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : Optional[int] = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
_lowerCamelCase : Optional[int] = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
_lowerCamelCase : Any = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE ( __A ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ElectraTokenizer
def __init__( self : Optional[int] , UpperCamelCase__ : Any=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str="[UNK]" , UpperCamelCase__ : Dict="[SEP]" , UpperCamelCase__ : List[Any]="[PAD]" , UpperCamelCase__ : List[Any]="[CLS]" , UpperCamelCase__ : Union[str, Any]="[MASK]" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , **UpperCamelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase__ ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(UpperCamelCase__ , normalizer_state.pop('type' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**UpperCamelCase__ )
UpperCamelCase = do_lower_case
def A ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ):
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 430 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
SCREAMING_SNAKE_CASE = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
SCREAMING_SNAKE_CASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def lowercase_ ( __A : str ) -> dict[str, int]:
"""simple docstring"""
lowercase : List[str] ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowercase_ ( __A : tuple ) -> str:
"""simple docstring"""
return x[0]
def lowercase_ ( __A : str ) -> str:
"""simple docstring"""
lowercase : Optional[Any] =get_letter_count(__A )
lowercase : dict[int, list[str]] ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__A )
lowercase : dict[int, str] ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__A )
lowercase : Optional[int] =''''''.join(freq_to_letter[freq] )
lowercase : str =list(freq_to_letter_str.items() )
freq_pairs.sort(key=__A , reverse=__A )
lowercase : list[str] =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(__A )
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : Optional[Any] =get_frequency_order(__A )
lowercase : Union[str, Any] =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Dict=3_0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_0_0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Any=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : List[Any]=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a_ : Optional[int] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
a_ : Union[str, Any] = parent
a_ : Optional[Any] = batch_size
a_ : Tuple = num_channels
a_ : List[Any] = min_resolution
a_ : Any = max_resolution
a_ : str = do_resize
a_ : str = size
a_ : str = do_normalize
a_ : Tuple = image_mean
a_ : Union[str, Any] = image_std
a_ : Tuple = do_rescale
a_ : List[Any] = rescale_factor
a_ : List[str] = do_pad
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if not batched:
a_ : str = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
a_ , a_ : List[Any] = image.size
else:
a_ , a_ : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
a_ : List[str] = int(self.size['shortest_edge'] * h / w )
a_ : str = self.size['shortest_edge']
elif w > h:
a_ : Optional[Any] = self.size['shortest_edge']
a_ : str = int(self.size['shortest_edge'] * w / h )
else:
a_ : List[str] = self.size['shortest_edge']
a_ : Union[str, Any] = self.size['shortest_edge']
else:
a_ : Dict = []
for image in image_inputs:
a_ , a_ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a_ : Optional[int] = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
a_ : Optional[int] = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Union[str, Any] = YolosImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
a_ : int = YolosImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
a_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
# Initialize image_processing
a_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
a_ : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a_ , a_ : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Union[str, Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# Initialize image_processing
a_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
a_ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a_ , a_ : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ : int = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
a_ , a_ : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
# Initialize image_processing
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
a_ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a_ , a_ : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ : Tuple = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
a_ , a_ : List[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
# Initialize image_processings
a_ : List[str] = self.image_processing_class(**self.image_processor_dict )
a_ : Optional[int] = self.image_processing_class(do_resize=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_rescale=SCREAMING_SNAKE_CASE__ )
# create random PyTorch tensors
a_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
a_ : Optional[Any] = image_processing_a.pad(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
a_ : Optional[Any] = image_processing_a(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
# prepare image and target
a_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
a_ : Optional[Any] = json.loads(f.read() )
a_ : Tuple = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
a_ : Union[str, Any] = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
a_ : Dict = image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
a_ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
a_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
# verify area
a_ : List[str] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
a_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
a_ : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# verify image_id
a_ : Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
a_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
a_ : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
a_ : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
a_ : Optional[int] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
a_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
a_ : Dict = json.loads(f.read() )
a_ : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
a_ : Tuple = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
a_ : int = YolosImageProcessor(format='coco_panoptic' )
a_ : List[str] = image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
a_ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
# verify area
a_ : int = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
a_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
a_ : int = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# verify image_id
a_ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
a_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
a_ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
a_ : Optional[int] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
a_ : Tuple = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
a_ : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
| 443 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : List[str] = MvpTokenizer
snake_case__ : Dict = MvpTokenizerFast
snake_case__ : Any = True
snake_case__ : Optional[int] = filter_roberta_detectors
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
super().setUp()
a_ : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
a_ : int = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
a_ : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a_ : str = {'unk_token': '<unk>'}
a_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a_ : List[Any] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : List[str] = tokenizer(SCREAMING_SNAKE_CASE__ , max_length=len(SCREAMING_SNAKE_CASE__ ) , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
a_ : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test that special tokens are reset
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , SCREAMING_SNAKE_CASE__ )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('labels' , SCREAMING_SNAKE_CASE__ )
self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE__ )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : List[str] = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , max_length=3_2 , padding='max_length' , return_tensors='pt' )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : Dict = tokenizer(
['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
a_ : Dict = ['A long paragraph for summarization.']
a_ : Dict = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , text_target=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
a_ : Dict = inputs['input_ids']
a_ : str = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a_ : List[str] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : int = 'A, <mask> AllenNLP sentence.'
a_ : Optional[Any] = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : int = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
a_ : Any = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 443 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def lowerCamelCase__ ( *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
snake_case__ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
__lowerCamelCase : Optional[Any] = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__lowerCamelCase : Any = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ):
__lowerCamelCase : Optional[Any] = vqa_pipeline(UpperCAmelCase , top_k=1 )
self.assertEqual(
UpperCAmelCase , [
[{"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}],
[{"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}],
] , )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : str = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__lowerCamelCase : Tuple = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowerCamelCase : str = "How many cats are there?"
__lowerCamelCase : Union[str, Any] = vqa_pipeline(image=UpperCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
UpperCAmelCase , [{"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}, {"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}] )
__lowerCamelCase : Any = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
UpperCAmelCase , [{"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}, {"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}] )
@slow
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Dict = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
__lowerCamelCase : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowerCamelCase : Union[str, Any] = "How many cats are there?"
__lowerCamelCase : int = vqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
__lowerCamelCase : Union[str, Any] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
__lowerCamelCase : Dict = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [[{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCamelCase__ ( self : Optional[int] ):
pass | 646 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_a: int = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Tuple = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Any = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Union[str, Any] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Optional[Any] = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_a: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 162 | 0 |
from __future__ import annotations
import math
def lowercase ( _a ,_a ,_a ,_a ,_a ) -> Any:
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(lowerCAmelCase__ ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 ,node_index * 2 ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,)
return min(
minimax(depth + 1 ,node_index * 2 ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,)
def lowercase ( ) -> Tuple:
UpperCAmelCase_: str = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCAmelCase_: str = math.log(len(lowerCAmelCase__ ) ,2 )
print("Optimal value : " ,end="" )
print(minimax(0 ,0 ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 706 |
def lowercase ( _a = 2000000 ) -> int:
UpperCAmelCase_: List[str] = [0 for i in range(n + 1 )]
UpperCAmelCase_: str = 1
UpperCAmelCase_: Union[str, Any] = 1
for i in range(2 ,int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,_a ):
UpperCAmelCase_: Union[str, Any] = 1
UpperCAmelCase_: Optional[Any] = 0
for i in range(_a ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""") | 306 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : List[str] = logging.get_logger(__name__)
UpperCamelCase_ : Union[str, Any] = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCamelCase_ : Optional[Any] = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCamelCase_ : int = 0
UpperCamelCase_ : Any = 0Xe000
UpperCamelCase_ : Optional[Any] = 0Xe001
UpperCamelCase_ : int = 0Xe002
UpperCamelCase_ : List[Any] = 0Xe003
UpperCamelCase_ : List[str] = 0Xe004
# Maps special codepoints to human-readable names.
UpperCamelCase_ : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCamelCase_ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,_SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=2_048 ,**_SCREAMING_SNAKE_CASE ,) -> Union[str, Any]:
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else bos_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else eos_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else sep_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else cls_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE ,model_max_length=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
# Creates a mapping for looking up the IDs of special symbols.
_snake_case = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_snake_case = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_snake_case = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_snake_case = UNICODE_VOCAB_SIZE
_snake_case = len(self._special_codepoints )
@property
def _lowercase ( self ) -> int:
return self._unicode_vocab_size
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
return list(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> int:
try:
return ord(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return "".join(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE ,token_ids_a=_SCREAMING_SNAKE_CASE ,already_has_special_tokens=_SCREAMING_SNAKE_CASE )
_snake_case = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
result += ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return result
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> str:
return ()
| 185 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = """rwkv"""
SCREAMING_SNAKE_CASE_ : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self ,_SCREAMING_SNAKE_CASE=50_277 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> List[str]:
_snake_case = vocab_size
_snake_case = context_length
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = attention_hidden_size if attention_hidden_size is not None else hidden_size
_snake_case = intermediate_size if intermediate_size is not None else 4 * hidden_size
_snake_case = layer_norm_epsilon
_snake_case = rescale_every
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(
tie_word_embeddings=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 185 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Union[str, Any] = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 721 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
UpperCamelCase : Tuple = False
class A__ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Any ):
a__ : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Optional[Any] = "A painting of a squirrel eating a burger "
a__ : List[Any] = torch.manual_seed(0 )
a__ : Dict = pipe(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
a__ : List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : str = generator.manual_seed(0 )
a__ : Optional[int] = pipe(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _UpperCamelCase( self : List[str] ):
a__ : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Tuple = "A painting of a squirrel eating a burger "
a__ : Union[str, Any] = torch.manual_seed(0 )
a__ : str = pipe(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
a__ : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
a__ : Tuple = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 151 | 0 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ,snake_case=None ,snake_case=None ,snake_case=0 ):
'''simple docstring'''
lowercase : str = 1.0 if scale is None else scale
lowercase : List[str] = 0.0 if loc is None else loc
super().__init__(snake_case ,[AffineTransform(loc=self.loc ,scale=self.scale ,event_dim=snake_case )] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.variance.sqrt()
class __snake_case ( nn.Module ):
def __init__( self ,snake_case ,snake_case ,snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : int = args_dim
lowercase : Union[str, Any] = nn.ModuleList([nn.Linear(snake_case ,snake_case ) for dim in args_dim.values()] )
lowercase : Optional[int] = domain_map
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : str = [proj(snake_case ) for proj in self.proj]
return self.domain_map(*snake_case )
class __snake_case ( nn.Module ):
def __init__( self ,snake_case ):
'''simple docstring'''
super().__init__()
lowercase : Dict = function
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,*snake_case ):
'''simple docstring'''
return self.function(snake_case ,*snake_case )
class __snake_case :
_a : type
_a : int
_a : Dict[str, int]
def __init__( self ,snake_case = 1 ):
'''simple docstring'''
lowercase : int = dim
lowercase : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*snake_case )
else:
return Independent(self.distribution_class(*snake_case ) ,1 )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = None ,):
'''simple docstring'''
lowercase : Union[str, Any] = self._base_distribution(snake_case )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(snake_case ,loc=snake_case ,scale=snake_case ,event_dim=self.event_dim )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.event_shape )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 0.0
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return ParameterProjection(
in_features=snake_case ,args_dim=self.args_dim ,domain_map=LambdaLayer(self.domain_map ) ,)
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(snake_case ) + 4.0 )) / 2.0
class __snake_case ( lowerCAmelCase ):
_a : Dict[str, int]= {"df": 1, "loc": 1, "scale": 1}
_a : type= StudentT
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = cls.squareplus(snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase : Dict = 2.0 + cls.squareplus(snake_case )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __snake_case ( lowerCAmelCase ):
_a : Dict[str, int]= {"loc": 1, "scale": 1}
_a : type= Normal
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = cls.squareplus(snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __snake_case ( lowerCAmelCase ):
_a : Dict[str, int]= {"total_count": 1, "logits": 1}
_a : type= NegativeBinomial
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = cls.squareplus(snake_case )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase , lowercase : Any = distr_args
if self.dim == 1:
return self.distribution_class(total_count=snake_case ,logits=snake_case )
else:
return Independent(self.distribution_class(total_count=snake_case ,logits=snake_case ) ,1 )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = None ):
'''simple docstring'''
lowercase , lowercase : Optional[int] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 336 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase : Optional[Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Dict = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
| 336 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str):
A_ : int = 0
# if input_string is "aba" than new_input_string become "a|b|a"
A_ : Dict = """"""
A_ : Any = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCamelCase) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
A_ : List[Any] = 0, 0
# length[i] shows the length of palindromic substring with center i
A_ : Union[str, Any] = [1 for i in range(len(lowerCamelCase))]
# for each character in new_string find corresponding palindromic string
A_ : Union[str, Any] = 0
for j in range(len(lowerCamelCase)):
A_ : Union[str, Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1)
while (
j - k >= 0
and j + k < len(lowerCamelCase)
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
A_ : Optional[Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
A_ : str = j - k + 1 # noqa: E741
A_ : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
A_ : str = length[j]
A_ : List[str] = j
# create that string
A_ : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCamelCase ( __a ):
a__ :Union[str, Any] = ['''image_processor''', '''tokenizer''']
a__ :Optional[int] = '''AutoImageProcessor'''
a__ :Any = '''AutoTokenizer'''
def __init__(self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ) -> List[Any]:
UpperCamelCase_ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
UpperCamelCase_ : Any = kwargs.pop("""feature_extractor""" )
UpperCamelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : List[str] = self.image_processor
UpperCamelCase_ : str = False
def __call__(self , *__UpperCamelCase , **__UpperCamelCase ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase_ : List[Any] = kwargs.pop("""images""" , __UpperCamelCase )
UpperCamelCase_ : str = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
UpperCamelCase_ : str = args[0]
UpperCamelCase_ : List[Any] = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
UpperCamelCase_ : List[Any] = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if text is not None:
UpperCamelCase_ : Optional[Any] = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase_ : List[Any] = encodings["""input_ids"""]
return inputs
def A_ (self , *__UpperCamelCase , **__UpperCamelCase ) -> List[str]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def A_ (self , *__UpperCamelCase , **__UpperCamelCase ) -> Tuple:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def A_ (self ) -> str:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : List[Any] = self.tokenizer
yield
UpperCamelCase_ : Any = self.image_processor
UpperCamelCase_ : Dict = False
def A_ (self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=None ) -> Union[str, Any]:
if added_vocab is None:
UpperCamelCase_ : Tuple = self.tokenizer.get_added_vocab()
UpperCamelCase_ : Any = {}
while tokens:
UpperCamelCase_ : Optional[Any] = re.search(R"""<s_(.*?)>""" , __UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase_ : int = start_token.group(1 )
UpperCamelCase_ : Union[str, Any] = re.search(Rf'''</s_{key}>''' , __UpperCamelCase , re.IGNORECASE )
UpperCamelCase_ : List[str] = start_token.group()
if end_token is None:
UpperCamelCase_ : List[Any] = tokens.replace(__UpperCamelCase , """""" )
else:
UpperCamelCase_ : Tuple = end_token.group()
UpperCamelCase_ : Dict = re.escape(__UpperCamelCase )
UpperCamelCase_ : List[str] = re.escape(__UpperCamelCase )
UpperCamelCase_ : List[str] = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , __UpperCamelCase , re.IGNORECASE )
if content is not None:
UpperCamelCase_ : List[Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase_ : Optional[int] = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
UpperCamelCase_ : Any = value[0]
UpperCamelCase_ : int = value
else: # leaf nodes
UpperCamelCase_ : Dict = []
for leaf in content.split(R"""<sep/>""" ):
UpperCamelCase_ : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase_ : Union[str, Any] = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
UpperCamelCase_ : Optional[int] = output[key][0]
UpperCamelCase_ : Optional[int] = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ (self ) -> Tuple:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def A_ (self ) -> Any:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor
| 635 | from __future__ import annotations
SCREAMING_SNAKE_CASE : Tuple = 1.6_021E-19 # units = C
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | 1 |
'''simple docstring'''
from math import factorial
def __UpperCAmelCase ( a_: int, a_: int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(a_ ) // (factorial(a_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'fifty-two card deck is: {combinations(52, 5)}\n',
)
print(
'If a class of 40 students must be arranged into groups of',
f'4 for group projects, there are {combinations(40, 4)} ways',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'are {combinations(10, 3)} ways that first, second and',
'third place can be awarded.',
) | 257 | '''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__a = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __UpperCAmelCase ( a_: Union[str, Any] ):
config.addinivalue_line(
"markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers", "tool_tests: mark the tool tests that are run on their specific schedule" )
def __UpperCAmelCase ( a_: Any ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __UpperCAmelCase ( a_: Tuple ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(a_, id=a_ )
def __UpperCAmelCase ( a_: Union[str, Any], a_: str ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
_UpperCAmelCase : List[Any] = 0
# Doctest custom flag to ignore output.
__a = doctest.register_optionflag('IGNORE_RESULT')
__a = doctest.OutputChecker
class A__ ( UpperCamelCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] ) -> Dict:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a = CustomOutputChecker
__a = HfDoctestModule
__a = HfDocTestParser | 257 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , A__ : List[Any] , A__ : int=7 , A__ : Union[str, Any]=3 , A__ : List[str]=30 , A__ : Optional[int]=4_00 , A__ : Optional[Any]=True , A__ : Optional[int]=None , A__ : Optional[Any]=True , A__ : Any=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : Any=True , A__ : int=1 / 2_55 , A__ : List[str]=True , ) -> Dict:
'''simple docstring'''
snake_case_ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : List[Any] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[int] = do_pad
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[int] , A__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
snake_case_ : Any = image_inputs[0]
if isinstance(A__ , Image.Image ):
snake_case_ ,snake_case_ : Dict = image.size
else:
snake_case_ ,snake_case_ : int = image.shape[1], image.shape[2]
if w < h:
snake_case_ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case_ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : List[Any] = self.size["shortest_edge"]
else:
snake_case_ : str = []
for image in image_inputs:
snake_case_ ,snake_case_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : List[Any] = max(A__ , key=lambda A__ : item[0] )[0]
snake_case_ : int = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
snake_case_ : int = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[str] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Optional[int] = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Dict = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : int = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : Optional[Any] = json.loads(f.read() )
snake_case_ : int = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case_ : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ : Any = image_processing(images=A__ , annotations=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Any = json.loads(f.read() )
snake_case_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case_ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ : str = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify masks
snake_case_ : Union[str, Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A__ )
# verify orig_size
snake_case_ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
| 666 | import math
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = []
snake_case_ : List[str] = 2
snake_case_ : Optional[int] = int(math.sqrt(lowerCAmelCase_ ) ) # Size of every segment
snake_case_ : str = [True] * (end + 1)
snake_case_ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase_ )
for i in range(start * start , end + 1 , lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = False
start += 1
prime += in_prime
snake_case_ : Dict = end + 1
snake_case_ : Dict = min(2 * end , lowerCAmelCase_ )
while low <= n:
snake_case_ : Any = [True] * (high - low + 1)
for each in in_prime:
snake_case_ : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase_ , high + 1 , lowerCAmelCase_ ):
snake_case_ : List[Any] = False
for j in range(len(lowerCAmelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ : int = high + 1
snake_case_ : Union[str, Any] = min(high + end , lowerCAmelCase_ )
return prime
print(sieve(1_0**6))
| 666 | 1 |
from typing import List
import numpy as np
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = {key: len(lowerCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n"
+ "\n".join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ "\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
lowercase__ : str = max(lists_lengths.values() , default=0 )
return max(1 , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = []
for group_idx in range(lowerCamelCase__ ):
lowercase__ : Tuple = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowercase__ : Tuple = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowercase__ : List[str] = range(lowerCamelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(lowerCamelCase__ )
return shards_indices_per_group
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = _number_of_shards_in_gen_kwargs(lowerCamelCase__ )
if num_shards == 1:
return [dict(lowerCamelCase__ )]
else:
lowercase__ : Union[str, Any] = _distribute_shards(num_shards=lowerCamelCase__ , max_num_jobs=lowerCamelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowerCamelCase__ ) )
]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowerCamelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = {len(lowerCamelCase__ ) for value in gen_kwargs.values() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
lowercase__ : Any = {}
for size in list_sizes:
lowercase__ : Union[str, Any] = list(range(lowerCamelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowercase__ : Optional[Any] = dict(lowerCamelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Any = [value[i] for i in indices_per_size[len(lowerCamelCase__ )]]
return shuffled_kwargs
| 717 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {"""add_prefix_space""": True}
lowercase_ = False
def snake_case ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[str] = {"unk_token": "<unk>"}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : List[str] = "lower newer"
lowercase__ : Optional[Any] = "lower newer"
return input_text, output_text
def snake_case ( self : Any ):
lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Dict = "lower newer"
lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokens + [tokenizer.unk_token]
lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
# Testing tokenization
lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : Dict = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Any ):
lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowercase__ : List[Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase__ : Any = tokenizer.pad_token_id
lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case ( self : str ):
lowercase__ : List[str] = "$$$"
lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Optional[int] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Tuple ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : str = "Encode this."
lowercase__ : List[Any] = "This one too please."
lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = encoded_sequence_dict["input_ids"]
lowercase__ : int = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE )
]
lowercase__ : Any = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE )
lowercase__ : int = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def snake_case ( self : Tuple ):
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "bos"
lowercase__ : List[Any] = tokenizer.get_vocab()["bos"]
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
| 81 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = '''bert-generation'''
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[int]=50_358 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Optional[int]=24 , _UpperCAmelCase : int=16 , _UpperCAmelCase : List[Any]=4_096 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Optional[int]=1E-1_2 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : str=1 , _UpperCAmelCase : str="absolute" , _UpperCAmelCase : int=True , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
| 7 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Dict = self.dummy_uncond_unet
A_ : Any = DDIMScheduler()
A_ : List[str] = self.dummy_vq_model
A_ : Union[str, Any] = LDMPipeline(unet=snake_case , vqvae=snake_case , scheduler=snake_case )
ldm.to(snake_case )
ldm.set_progress_bar_config(disable=snake_case )
A_ : Any = torch.manual_seed(0 )
A_ : Optional[Any] = ldm(generator=snake_case , num_inference_steps=2 , output_type="numpy" ).images
A_ : Dict = torch.manual_seed(0 )
A_ : Any = ldm(generator=snake_case , num_inference_steps=2 , output_type="numpy" , return_dict=snake_case )[0]
A_ : int = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
A_ : List[str] = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : int = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(snake_case )
ldm.set_progress_bar_config(disable=snake_case )
A_ : int = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=snake_case , num_inference_steps=5 , output_type="numpy" ).images
A_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ : Any = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
A_ : List[Any] = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 454 | 0 |
import math
def UpperCAmelCase ( A__ , A__ = 0 , A__ = 0 ) -> list:
_snake_case : List[Any] = end or len(A__ )
for i in range(A__ , A__ ):
_snake_case : str = i
_snake_case : Optional[Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_snake_case : Any = array[temp_index - 1]
temp_index -= 1
_snake_case : Optional[Any] = temp_index_value
return array
def UpperCAmelCase ( A__ , A__ , A__ ) -> None: # Max Heap
_snake_case : Dict = index
_snake_case : int = 2 * index + 1 # Left Node
_snake_case : Optional[int] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_snake_case : str = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_snake_case : Union[str, Any] = right_index
if largest != index:
_snake_case , _snake_case : Tuple = array[largest], array[index]
heapify(A__ , A__ , A__ )
def UpperCAmelCase ( A__ ) -> list:
_snake_case : int = len(A__ )
for i in range(n // 2 , -1 , -1 ):
heapify(A__ , A__ , A__ )
for i in range(n - 1 , 0 , -1 ):
_snake_case , _snake_case : int = array[0], array[i]
heapify(A__ , 0 , A__ )
return array
def UpperCAmelCase ( A__ , A__ , A__ , A__ ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCAmelCase ( A__ , A__ , A__ , A__ ) -> int:
_snake_case : int = low
_snake_case : Optional[int] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_snake_case , _snake_case : Tuple = array[j], array[i]
i += 1
def UpperCAmelCase ( A__ ) -> list:
if len(A__ ) == 0:
return array
_snake_case : List[str] = 2 * math.ceil(math.loga(len(A__ ) ) )
_snake_case : Union[str, Any] = 16
return intro_sort(A__ , 0 , len(A__ ) , A__ , A__ )
def UpperCAmelCase ( A__ , A__ , A__ , A__ , A__ ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(A__ )
max_depth -= 1
_snake_case : List[Any] = median_of_a(A__ , A__ , start + ((end - start) // 2) + 1 , end - 1 )
_snake_case : Union[str, Any] = partition(A__ , A__ , A__ , A__ )
intro_sort(A__ , A__ , A__ , A__ , A__ )
_snake_case : List[Any] = p
return insertion_sort(A__ , A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('''Enter numbers separated by a comma : ''').strip()
UpperCAmelCase_ = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 519 |
from collections.abc import Iterable
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar('''_T''')
class __SCREAMING_SNAKE_CASE ( Generic[_T] ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
_snake_case : list[_T] = list(iterable or [] )
_snake_case : list[_T] = []
def __len__( self ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
"""simple docstring"""
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self._stacka.append(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[int] = self._stacka.pop
_snake_case : Optional[int] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 519 | 1 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = [10, 20, 30, 40, 50, 60]
lowerCAmelCase = [2, 4, 6, 8, 10, 12]
lowerCAmelCase = 1_00
self.assertEqual(kp.calc_profit(_snake_case , _snake_case , _snake_case ) , 2_10 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertRaisesRegex(_snake_case , 'max_weight must greater than zero.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertRaisesRegex(_snake_case , 'Weight can not be negative.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertRaisesRegex(_snake_case , 'Profit can not be negative.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertRaisesRegex(_snake_case , 'max_weight must greater than zero.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertRaisesRegex(
_snake_case , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 |
from collections.abc import Sequence
def __lowerCAmelCase ( _UpperCamelCase : Sequence[int] | None = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
SCREAMING_SNAKE_CASE = nums[0]
for i in range(1 , len(_UpperCamelCase ) ):
SCREAMING_SNAKE_CASE = nums[i]
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , ans + num , _UpperCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
a_ : str = int(input("Enter number of elements : ").strip())
a_ : Any = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 439 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__="None" , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ):
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = relative_attention
UpperCamelCase = position_biased_input
UpperCamelCase = pos_att_type
UpperCamelCase = scope
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_config()
UpperCamelCase = 3_0_0
return config
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = DebertaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )[0]
UpperCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )[0]
UpperCamelCase = model(lowerCamelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = DebertaForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.num_labels
UpperCamelCase = DebertaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.num_labels
UpperCamelCase = DebertaForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = DebertaForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( snake_case_, snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_snake_case = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = DebertaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase__ )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = DebertaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
# compare the actual values for a slice.
UpperCamelCase = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 705 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Any = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 | 0 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , UpperCAmelCase__ : str = None , UpperCAmelCase__ : uuid.UUID = None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : int=None ):
'''simple docstring'''
if not conversation_id:
lowercase : Any =uuid.uuida()
if past_user_inputs is None:
lowercase : List[Any] =[]
if generated_responses is None:
lowercase : Dict =[]
lowercase : uuid.UUID =conversation_id
lowercase : List[str] =past_user_inputs
lowercase : List[str] =generated_responses
lowercase : Optional[str] =text
def __eq__( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
lowercase : List[str] =text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
lowercase : List[Any] =text
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase : List[Any] =None
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
self.generated_responses.append(UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ):
'''simple docstring'''
lowercase : str =F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
lowercase : List[str] ='''user''' if is_user else '''bot'''
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
lowercase__ , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : str ):
'''simple docstring'''
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
if self.tokenizer.pad_token_id is None:
lowercase : Tuple =self.tokenizer.eos_token
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple ={}
lowercase : Tuple ={}
lowercase : Dict ={}
if min_length_for_response is not None:
lowercase : List[str] =min_length_for_response
if minimum_tokens is not None:
lowercase : Optional[Any] =minimum_tokens
if "max_length" in generate_kwargs:
lowercase : List[str] =generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase : Optional[Any] =clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCAmelCase__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : str , UpperCAmelCase__ : Union[Conversation, List[Conversation]] , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : List[Any] =super().__call__(UpperCAmelCase__ , num_workers=UpperCAmelCase__ , **UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) == 1:
return outputs[0]
return outputs
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Conversation , UpperCAmelCase__ : Any=32 ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
lowercase : Optional[int] =self.tokenizer._build_conversation_input_ids(UpperCAmelCase__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase : Any =self._legacy_parse_and_tokenize(UpperCAmelCase__ )
if self.framework == "pt":
lowercase : int =torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase : int =tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=10 , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =generate_kwargs.get('''max_length''' , self.model.config.max_length )
lowercase : List[str] =model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
lowercase : Dict =max_length - minimum_tokens
lowercase : Any =model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
lowercase : Tuple =model_inputs['''attention_mask'''][:, -trim:]
lowercase : int =model_inputs.pop('''conversation''' )
lowercase : Any =max_length
lowercase : Any =self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
if self.model.config.is_encoder_decoder:
lowercase : int =1
else:
lowercase : List[str] =n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=True ):
'''simple docstring'''
lowercase : Optional[int] =model_outputs['''output_ids''']
lowercase : int =self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , )
lowercase : Optional[Any] =model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(UpperCAmelCase__ )
return conversation
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Conversation ):
'''simple docstring'''
lowercase : Optional[int] =self.tokenizer.eos_token_id
lowercase : List[Any] =[]
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) )
if len(UpperCAmelCase__ ) > self.tokenizer.model_max_length:
lowercase : Optional[Any] =input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 92 | '''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
UpperCamelCase_ = datasets.logging.get_logger(__name__)
UpperCamelCase_ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCamelCase_ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCamelCase_ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE( datasets.Metric ):
def __lowerCamelCase ( self : Tuple ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'sources': datasets.Value('string' , id='sequence' ),
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] , )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : List[str] ) -> Dict:
if self.config_name == "default":
SCREAMING_SNAKE_CASE__ :List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
SCREAMING_SNAKE_CASE__ :List[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __lowerCamelCase ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=False ) -> Any:
if gpus is None:
SCREAMING_SNAKE_CASE__ :Dict = 1 if torch.cuda.is_available() else 0
SCREAMING_SNAKE_CASE__ :Dict = {'src': sources, 'mt': predictions, 'ref': references}
SCREAMING_SNAKE_CASE__ :List[str] = [dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) for t in zip(*data.values() )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.scorer.predict(UpperCamelCase_ , gpus=UpperCamelCase_ , progress_bar=UpperCamelCase_ )
return {"mean_score": mean_score, "scores": scores}
| 209 | 0 |
'''simple docstring'''
from itertools import permutations
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(_lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( _lowercase = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowercase , _lowercase ) ) )
for num in permutations(range(_lowercase ) )
if is_substring_divisible(_lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 357 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowercase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__lowercase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Tuple = CustomTokenizer
pass
| 551 | 0 |
"""simple docstring"""
import random
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int = num - 1
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : Dict = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Any = pow(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : int = i + 1
SCREAMING_SNAKE_CASE__ : int = (v**2) % num
return True
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : int = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(UpperCAmelCase__ )
def _lowercase ( __lowerCAmelCase = 1024 ) -> Any:
while True:
SCREAMING_SNAKE_CASE__ : List[str] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(UpperCAmelCase__ ):
return num
if __name__ == "__main__":
a :Tuple = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 713 |
"""simple docstring"""
from math import sqrt
def _lowercase ( __lowerCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase ( __lowerCAmelCase = 1_0001 ) -> int:
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'{solution() = }')
| 12 | 0 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_a : Any = re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
A = 42
A = None
A = None
A = None
A = None
def lowerCamelCase_ ( self ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = _str_to_version_tuple(self.version_str )
def __repr__( self ):
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def lowerCamelCase_ ( self ):
return self.major, self.minor, self.patch
def lowerCamelCase_ ( self , UpperCAmelCase ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
return Version(UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
return other
raise TypeError(f'''{other} (type {type(UpperCAmelCase )}) cannot be compared to version.''' )
def __eq__( self , UpperCAmelCase ):
try:
__lowerCamelCase = self._validate_operand(UpperCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , UpperCAmelCase ):
__lowerCamelCase = self._validate_operand(UpperCAmelCase )
return self.tuple < other.tuple
def __hash__( self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowerCamelCase_ ( cls , UpperCAmelCase ):
__lowerCamelCase = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowerCamelCase_ ( self ):
return self.version_str
def UpperCamelCase__ ( _A: Optional[int] ):
'''simple docstring'''
__lowerCamelCase = _VERSION_REG.match(_A )
if not res:
raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(_A ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def UpperCamelCase__ ( _A: Dict ):
'''simple docstring'''
return ".".join(str(_A ) for v in version_tuple )
| 479 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCamelCase_ ( __UpperCamelCase ,__UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 7_6_8 , ):
super().__init__()
__lowerCamelCase = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
__lowerCamelCase = nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def lowerCamelCase_ ( self , UpperCAmelCase = None , UpperCAmelCase = None , ):
__lowerCamelCase = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
__lowerCamelCase = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = (embeds * self.std) + self.mean
return embeds
| 479 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowercase__ =logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowercase__ ='cuda' if torch.cuda.is_available() else 'cpu'
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str=1_0_0 , lowerCAmelCase__ : Union[str, Any]=" " ):
__a : Tuple = text.split(lowerCAmelCase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )]
def __UpperCamelCase ( lowerCAmelCase__ : dict ):
__a , __a : Dict = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(lowerCAmelCase__ ):
titles.append(title if title is not None else '''''' )
texts.append(lowerCAmelCase__ )
return {"title": titles, "text": texts}
def __UpperCamelCase ( lowerCAmelCase__ : dict , lowerCAmelCase__ : DPRContextEncoder , lowerCAmelCase__ : DPRContextEncoderTokenizerFast ):
__a : Dict = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
__a : Union[str, Any] = ctx_encoder(input_ids.to(device=lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __UpperCamelCase ( lowerCAmelCase__ : "RagExampleArguments" , lowerCAmelCase__ : "ProcessingArguments" , lowerCAmelCase__ : "IndexHnswArguments" , ):
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__a : List[Any] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__a : Optional[Any] = dataset.map(lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
__a : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCAmelCase__ )
__a : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__a : List[str] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
__a : List[Any] = dataset.map(
partial(lowerCAmelCase__ , ctx_encoder=lowerCAmelCase__ , ctx_tokenizer=lowerCAmelCase__ ) , batched=lowerCAmelCase__ , batch_size=processing_args.batch_size , features=lowerCAmelCase__ , )
# And finally save your dataset
__a : List[str] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(lowerCAmelCase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__a : int = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=lowerCAmelCase__ )
# And save the index
__a : List[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(lowerCAmelCase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : str = field(
default=str(Path(__lowercase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
_SCREAMING_SNAKE_CASE : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
_SCREAMING_SNAKE_CASE : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=str(Path(__lowercase ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
_SCREAMING_SNAKE_CASE : int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
_SCREAMING_SNAKE_CASE : int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowercase__ =HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowercase__ , lowercase__ , lowercase__ =parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowercase__ =rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 326 |
lowercase__ ={'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
lowercase__ =['a', 'b', 'c', 'd', 'e']
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ):
__a : str = start
# add current to visited
visited.append(lowerCAmelCase__ )
__a : Any = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a : Dict = topological_sort(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# if all neighbors visited add current to sort
sort.append(lowerCAmelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
for vertice in vertices:
if vertice not in visited:
__a : int = topological_sort(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# return sort
return sort
if __name__ == "__main__":
lowercase__ =topological_sort('a', [], [])
print(sort)
| 326 | 1 |
lowerCamelCase_ = 0 # The first color of the flag.
lowerCamelCase_ = 1 # The second color of the flag.
lowerCamelCase_ = 2 # The third color of the flag.
lowerCamelCase_ = (red, white, blue)
def __magic_name__ ( __a : list ):
'''simple docstring'''
if not sequence:
return []
if len(__a ) == 1:
return list(__a )
UpperCamelCase__ = 0
UpperCamelCase__ = len(__a ) - 1
UpperCamelCase__ = 0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCamelCase__ , UpperCamelCase__ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCamelCase__ , UpperCamelCase__ = sequence[high], sequence[mid]
high -= 1
else:
UpperCamelCase__ = f"The elements inside the sequence must contains only {colors} values"
raise ValueError(__a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = input('''Enter numbers separated by commas:\n''').strip()
lowerCamelCase_ = [int(item.strip()) for item in user_input.split(''',''')]
print(f'{dutch_national_flag_sort(unsorted)}')
| 513 |
from cva import destroyAllWindows, imread, imshow, waitKey
def __magic_name__ ( __a : List[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__a ):
for j in range(__a ):
UpperCamelCase__ = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCamelCase_ = imread('''image_data/lena.jpg''', 1)
# convert to its negative
lowerCamelCase_ = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 513 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ = "image_segmenter"
UpperCAmelCase_ = CLIPSegForImageSegmentation
UpperCAmelCase_ = ["image", "text"]
UpperCAmelCase_ = ["image"]
def __init__( self : List[str], *_UpperCAmelCase : Any, **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
requires_backends(self, ["vision"] )
super().__init__(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : Union[str, Any], _UpperCAmelCase : "Image", _UpperCAmelCase : str ) -> str:
"""simple docstring"""
return self.pre_processor(text=[label], images=[image], padding=_UpperCAmelCase, return_tensors="pt" )
def A_ ( self : Optional[Any], _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = self.model(**_UpperCAmelCase ).logits
return logits
def A_ ( self : Optional[int], _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Dict = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 157 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_lowerCamelCase : List[Any] = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
_lowerCamelCase : List[str] = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
_lowerCamelCase : Optional[int] = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
"""simple docstring"""
def A_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token" ), id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token" ), id="sequence" ), id="references" ),
} ), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def A_ ( self : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : str, _UpperCAmelCase : Union[str, Any]=4, _UpperCAmelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = compute_bleu(
reference_corpus=_UpperCAmelCase, translation_corpus=_UpperCAmelCase, max_order=_UpperCAmelCase, smooth=_UpperCAmelCase )
((SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__)) : Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 157 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: Optional[Any] ):
_UpperCAmelCase : Tuple = [1]
for i in range(2, A__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = list(range(A__ ) )
# Find permutation
while factorials:
_UpperCAmelCase : Any = factorials.pop()
_UpperCAmelCase , _UpperCAmelCase : Dict = divmod(A__, A__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod() | 494 |
def _lowerCAmelCase ( A__: int , A__: int ):
'''simple docstring'''
if not isinstance(A__ , A__ ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(A__ , A__ ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
UpperCAmelCase = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(A__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254 | 0 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(lowerCAmelCase_ ) * abs(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 703 |
from math import ceil
def lowerCamelCase__ ( _lowercase = 1001 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase_ : Tuple = 2 * i + 1
UpperCAmelCase_ : List[str] = 2 * i
UpperCAmelCase_ : List[str] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__a = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number') | 300 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 688 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[float] , snake_case_ : list[float] ) -> float:
__snake_case = sorted(numsa + numsa )
__snake_case , __snake_case = divmod(len(snake_case_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ = [float(x) for x in input('Enter the elements of first array: ').split()]
snake_case_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 592 | 0 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowercase__ ( lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def lowercase__ ( lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
for char in word:
UpperCAmelCase = ord(lowerCAmelCase )
if not _is_chinese_char(lowerCAmelCase ):
return 0
return 1
def lowercase__ ( lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = set()
for token in tokens:
UpperCAmelCase = len(lowerCAmelCase ) > 1 and is_chinese(lowerCAmelCase )
if chinese_word:
word_set.add(lowerCAmelCase )
UpperCAmelCase = list(lowerCAmelCase )
return word_list
def lowercase__ ( lowerCAmelCase : List[str] , lowerCAmelCase : set() ) -> int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
UpperCAmelCase = max([len(lowerCAmelCase ) for w in chinese_word_set] )
UpperCAmelCase = bert_tokens
UpperCAmelCase , UpperCAmelCase = 0, len(lowerCAmelCase )
while start < end:
UpperCAmelCase = True
if is_chinese(bert_word[start] ):
UpperCAmelCase = min(end - start , lowerCAmelCase )
for i in range(lowerCAmelCase , 1 , -1 ):
UpperCAmelCase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase = '##' + bert_word[j]
UpperCAmelCase = start + i
UpperCAmelCase = False
break
if single_word:
start += 1
return bert_word
def lowercase__ ( lowerCAmelCase : List[str] , lowerCAmelCase : LTP , lowerCAmelCase : BertTokenizer ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = []
for i in range(0 , len(lowerCAmelCase ) , 100 ):
UpperCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws
UpperCAmelCase = [get_chinese_word(lowerCAmelCase ) for r in res]
ltp_res.extend(lowerCAmelCase )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
UpperCAmelCase = []
for i in range(0 , len(lowerCAmelCase ) , 100 ):
UpperCAmelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCAmelCase , truncation=lowerCAmelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
UpperCAmelCase = []
for input_ids, chinese_word in zip(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = []
for id in input_ids:
UpperCAmelCase = bert_tokenizer._convert_id_to_token(lowerCAmelCase )
input_tokens.append(lowerCAmelCase )
UpperCAmelCase = add_sub_symbol(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase ):
if token[:2] == "##":
UpperCAmelCase = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase ) == 1 and _is_chinese_char(ord(lowerCAmelCase ) ):
ref_id.append(lowerCAmelCase )
ref_ids.append(lowerCAmelCase )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
return ref_ids
def lowercase__ ( lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [line.strip() for line in data if len(lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase = prepare_ref(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
UpperCAmelCase = [json.dumps(lowerCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
main(args)
| 183 |
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def lowercase__ ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 183 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
lowerCAmelCase__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Any = torch.load(A_, map_location='''cpu''' )
return sd
def snake_case_ ( A_ : Any, A_ : List[str], A_ : Dict=rename_keys_prefix ):
'''simple docstring'''
_lowerCamelCase : int = OrderedDict()
_lowerCamelCase : Optional[Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Any = new_key.replace(name_pair[0], name_pair[1] )
_lowerCamelCase : Tuple = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : Union[str, Any] = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def snake_case_ ( A_ : Dict, A_ : Dict ):
'''simple docstring'''
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : List[str] = '''pretraining'''
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {'''visual_embedding_dim''': 5_12}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : int = {'''visual_embedding_dim''': 20_48}
elif "vqa" in checkpoint_path:
_lowerCamelCase : List[Any] = {'''visual_embedding_dim''': 20_48}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Dict = {'''visual_embedding_dim''': 10_24}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Dict = {'''visual_embedding_dim''': 5_12}
_lowerCamelCase : int = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {'''visual_embedding_dim''': 20_48}
_lowerCamelCase : str = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29}
_lowerCamelCase : List[str] = '''vqa'''
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
'''visual_embedding_dim''': 10_24,
'''num_labels''': 2,
}
_lowerCamelCase : Union[str, Any] = '''nlvr'''
_lowerCamelCase : Any = VisualBertConfig(**A_ )
# Load State Dict
_lowerCamelCase : Any = load_state_dict(A_ )
_lowerCamelCase : Optional[int] = get_new_dict(A_, A_ )
if model_type == "pretraining":
_lowerCamelCase : Optional[int] = VisualBertForPreTraining(A_ )
elif model_type == "vqa":
_lowerCamelCase : List[Any] = VisualBertForQuestionAnswering(A_ )
elif model_type == "nlvr":
_lowerCamelCase : Optional[Any] = VisualBertForVisualReasoning(A_ )
elif model_type == "multichoice":
_lowerCamelCase : int = VisualBertForMultipleChoice(A_ )
model.load_state_dict(A_ )
# Save Checkpoints
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 151 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__UpperCAmelCase =None
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__UpperCAmelCase ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase ={
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
__UpperCAmelCase =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = ["""input_ids""", """attention_mask"""]
lowercase__ : Tuple = NllbTokenizer
lowercase__ : List[int] = []
lowercase__ : List[int] = []
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ = legacy_behaviour
super().__init__(
vocab_file=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , legacy_behaviour=UpperCamelCase__ , **UpperCamelCase__ , )
A__ = vocab_file
A__ = False if not self.vocab_file else True
A__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
A__ = {
lang_code: self.convert_tokens_to_ids(UpperCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A__ = src_lang if src_lang is not None else "eng_Latn"
A__ = self.convert_tokens_to_ids(self._src_lang )
A__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A__ = src_lang
A__ = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
A__ = self.convert_tokens_to_ids(UpperCamelCase__ )
A__ = tgt_lang_id
return inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "eng_Latn" , UpperCamelCase__ = None , UpperCamelCase__ = "fra_Latn" , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = src_lang
A__ = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.convert_tokens_to_ids(UpperCamelCase__ )
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
A__ = self.convert_ids_to_tokens(self.prefix_tokens )
A__ = self.convert_ids_to_tokens(self.suffix_tokens )
A__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.convert_tokens_to_ids(UpperCamelCase__ )
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
A__ = self.convert_ids_to_tokens(self.prefix_tokens )
A__ = self.convert_ids_to_tokens(self.suffix_tokens )
A__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
A__ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,) | 261 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Tuple = """blenderbot-small"""
lowercase__ : List[Any] = ["""past_key_values"""]
lowercase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , UpperCamelCase__=5_02_65 , UpperCamelCase__=5_12 , UpperCamelCase__=8 , UpperCamelCase__=20_48 , UpperCamelCase__=16 , UpperCamelCase__=8 , UpperCamelCase__=20_48 , UpperCamelCase__=16 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="gelu" , UpperCamelCase__=5_12 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=2 , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A__ = {0: "batch"}
A__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A__ = {0: "batch", 1: "decoder_sequence"}
A__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A__ , A__ = self.num_layers
for i in range(UpperCamelCase__ ):
A__ = {0: "batch", 2: "past_sequence + sequence"}
A__ = {0: "batch", 2: "past_sequence + sequence"}
else:
A__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super().outputs
else:
A__ = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A__ , A__ = self.num_layers
for i in range(UpperCamelCase__ ):
A__ = {0: "batch", 2: "past_sequence + sequence"}
A__ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A__ = seq_length if not self.use_past else 1
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
A__ = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ = common_inputs["input_ids"].shape
A__ = common_inputs["decoder_input_ids"].shape[1]
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = decoder_seq_length + 3
A__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A__ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A__ , A__ = self.num_layers
A__ = min(UpperCamelCase__ , UpperCamelCase__ )
A__ = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A__ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A__ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ = seqlen + 2
A__ , A__ = self.num_layers
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = common_inputs["attention_mask"].dtype
A__ = torch.cat(
[common_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A__ = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A__ = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A__ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A__ = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A__ = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A__ = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) | 261 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ : Tuple = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__ : List[str] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__ : List[Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
UpperCamelCase__ : Any = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
UpperCamelCase__ : int = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
UpperCamelCase__ : Union[str, Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCamelCase__ : List[Any] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCamelCase__ : List[Any] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowerCamelCase_ ( __snake_case ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = DPRContextEncoderTokenizer
class lowerCamelCase_ ( __snake_case ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = DPRQuestionEncoderTokenizer
UpperCamelCase__ : List[str] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
UpperCamelCase__ : Optional[int] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
UpperCamelCase__ : Union[str, Any] = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__snake_case )
class lowerCamelCase_ :
def __call__( self : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Dict = None ,__lowerCamelCase : Union[str, Any] = None ,__lowerCamelCase : Optional[Any] = False ,__lowerCamelCase : Dict = False ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : List[str] = None ,__lowerCamelCase : Any = None ,**__lowerCamelCase : Any ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
snake_case_ ,padding=snake_case_ ,truncation=snake_case_ ,max_length=snake_case_ ,return_tensors=snake_case_ ,return_attention_mask=snake_case_ ,**snake_case_ ,)
elif titles is None or texts is None:
a = titles if texts is None else texts
return super().__call__(
snake_case_ ,snake_case_ ,padding=snake_case_ ,truncation=snake_case_ ,max_length=snake_case_ ,return_tensors=snake_case_ ,return_attention_mask=snake_case_ ,**snake_case_ ,)
a = titles if not isinstance(snake_case_ ,snake_case_ ) else [titles]
a = texts if not isinstance(snake_case_ ,snake_case_ ) else [texts]
a = len(snake_case_ )
a = questions if not isinstance(snake_case_ ,snake_case_ ) else [questions] * n_passages
assert len(snake_case_ ) == len(
snake_case_ ), F"""There should be as many titles than texts but got {len(snake_case_ )} titles and {len(snake_case_ )} texts."""
a = super().__call__(snake_case_ ,snake_case_ ,padding=snake_case_ ,truncation=snake_case_ )['''input_ids''']
a = super().__call__(snake_case_ ,add_special_tokens=snake_case_ ,padding=snake_case_ ,truncation=snake_case_ )['''input_ids''']
a = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case_ ,snake_case_ )
]
}
if return_attention_mask is not False:
a = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
a = attention_mask
return self.pad(snake_case_ ,padding=snake_case_ ,max_length=snake_case_ ,return_tensors=snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Tuple ,__lowerCamelCase : List[str] ,__lowerCamelCase : Optional[Any] = 16 ,__lowerCamelCase : Optional[Any] = 64 ,__lowerCamelCase : Optional[Any] = 4 ,):
'''simple docstring'''
a = reader_input['''input_ids''']
a , a , a = reader_output[:3]
a = len(snake_case_ )
a = sorted(range(snake_case_ ) ,reverse=snake_case_ ,key=relevance_logits.__getitem__ )
a = []
for doc_id in sorted_docs:
a = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
a = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a = sequence_ids.index(self.pad_token_id )
else:
a = len(snake_case_ )
a = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=snake_case_ ,top_spans=snake_case_ ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=snake_case_ ,start_index=snake_case_ ,end_index=snake_case_ ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(snake_case_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : str ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Dict ,):
'''simple docstring'''
a = []
for start_index, start_score in enumerate(snake_case_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
a = sorted(snake_case_ ,key=lambda __lowerCamelCase : x[1] ,reverse=snake_case_ )
a = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
a = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class lowerCamelCase_ ( __snake_case , __snake_case ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = READER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = READER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ = DPRReaderTokenizer
| 387 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def lowerCAmelCase__ ( self ):
_A = self.dummy_uncond_unet
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' ).images
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' , return_dict=snake_case_ )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
_A = 'google/ncsnpp-celebahq-256'
_A = UNetaDModel.from_pretrained(snake_case_ )
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=20 , generator=snake_case_ , output_type='numpy' ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 27 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Any = 3
UpperCAmelCase_ : List[str] = (32, 32)
UpperCAmelCase_ : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__snake_case )
return image
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(__snake_case )
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
def extract(*__snake_case : Any , **__snake_case : List[str] ):
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = torch.ones([0] )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : List[Any] ):
'''simple docstring'''
self.pixel_values.to(__snake_case )
return self
return Out()
return extract
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.dummy_cond_unet
UpperCAmelCase_ : Tuple = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
UpperCAmelCase_ : int = self.dummy_vae
UpperCAmelCase_ : Dict = self.dummy_text_encoder
UpperCAmelCase_ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : int = StableDiffusionPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : int = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : int = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : Optional[int] = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ : Any = sd_pipe([prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : str = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__snake_case , )[0]
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Optional[int] = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.dummy_cond_unet
UpperCAmelCase_ : int = PNDMScheduler(skip_prk_steps=__snake_case )
UpperCAmelCase_ : List[str] = self.dummy_vae
UpperCAmelCase_ : Dict = self.dummy_text_encoder
UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Optional[Any] = StableDiffusionPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : Dict = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Dict = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ : List[Any] = sd_pipe([prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Tuple = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__snake_case , )[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : List[Any] = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(pipe.scheduler , __snake_case )
assert pipe.safety_checker is None
UpperCAmelCase_ : List[str] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__snake_case )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__snake_case )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase_ : int = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.dummy_cond_unet
UpperCAmelCase_ : Optional[int] = PNDMScheduler(skip_prk_steps=__snake_case )
UpperCAmelCase_ : Tuple = self.dummy_vae
UpperCAmelCase_ : str = self.dummy_text_encoder
UpperCAmelCase_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
UpperCAmelCase_ : Tuple = unet.half()
UpperCAmelCase_ : Dict = vae.half()
UpperCAmelCase_ : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__snake_case )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : List[Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Any = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
UpperCAmelCase_ : Optional[Any] = 4_003_660_346
UpperCAmelCase_ : Optional[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase_ : List[Any] = torch.manual_seed(__snake_case )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
UpperCAmelCase_ : List[str] = torch.manual_seed(__snake_case )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__snake_case )
UpperCAmelCase_ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Optional[Any] = '''padme amidala taking a bath artwork, safe for work, no nudity'''
UpperCAmelCase_ : str = 2_734_971_755
UpperCAmelCase_ : Tuple = 7
UpperCAmelCase_ : Any = torch.manual_seed(__snake_case )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
UpperCAmelCase_ : Optional[int] = torch.manual_seed(__snake_case )
UpperCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : int = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
UpperCAmelCase_ : int = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ : Union[str, Any] = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
UpperCAmelCase_ : Optional[Any] = 1_044_355_234
UpperCAmelCase_ : int = 12
UpperCAmelCase_ : List[Any] = torch.manual_seed(__snake_case )
UpperCAmelCase_ : Optional[int] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
UpperCAmelCase_ : Dict = torch.manual_seed(__snake_case )
UpperCAmelCase_ : List[Any] = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=__snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 710 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 641 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCAmelCase = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
if "://" in dataset_path:
__lowercase =dataset_path.split('://' )[1]
return dataset_path
def __UpperCamelCase ( lowercase__ : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __UpperCamelCase ( lowercase__ : fsspec.AbstractFileSystem, lowercase__ : str, lowercase__ : str ):
'''simple docstring'''
__lowercase =not is_remote_filesystem(lowercase__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase__ ), fs._strip_protocol(lowercase__ ) )
else:
fs.mv(lowercase__, lowercase__, recursive=lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
if hasattr(fsspec.asyn, 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowercase =None
__lowercase =None
__lowercase =threading.Lock()
| 119 |
'''simple docstring'''
import string
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__lowercase =''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowercase =string.ascii_uppercase.find(lowercase__ )
__lowercase =num - key
if num < 0:
__lowercase =num + len(string.ascii_uppercase )
__lowercase =translated + string.ascii_uppercase[num]
else:
__lowercase =translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =input('Encrypted message: ' )
__lowercase =message.upper()
decrypt(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 119 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =max(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ )
# create the counting array
__UpperCamelCase =coll_max + 1 - coll_min
__UpperCamelCase =[0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCamelCase =[0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
return "".join([chr(SCREAMING_SNAKE_CASE__ ) for i in counting_sort([ord(SCREAMING_SNAKE_CASE__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
_A = input('Enter numbers separated by a comma:\n').strip()
_A = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 682 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
UpperCamelCase__ : Optional[int] = int(input('''Enter number: ''').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 105 |
def lowerCAmelCase_ ( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(__lowerCamelCase , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81 | 0 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def _lowercase ( *__A : Tuple , **__A : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self : Optional[int] , __A : Tuple , __A : int , __A : Optional[Any] ):
snake_case__ : Any = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
snake_case__ : Optional[Any] = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def _lowercase ( self : int , __A : List[Any] , __A : int ):
snake_case__ : Union[str, Any] = vqa_pipeline(UpperCAmelCase_ , top_k=1 )
self.assertEqual(
UpperCAmelCase_ , [
[{"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}],
[{"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}],
] , )
@require_torch
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Any = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
snake_case__ : List[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
snake_case__ : Union[str, Any] = "How many cats are there?"
snake_case__ : str = vqa_pipeline(image=UpperCAmelCase_ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
UpperCAmelCase_ , [{"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}, {"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}] )
snake_case__ : List[str] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
UpperCAmelCase_ , [{"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}, {"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}] )
@slow
@require_torch
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
snake_case__ : Optional[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
snake_case__ : int = "How many cats are there?"
snake_case__ : List[Any] = vqa_pipeline(image=UpperCAmelCase_ , question=UpperCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
snake_case__ : Any = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
snake_case__ : str = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [[{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def _lowercase ( self : List[str] ):
pass
| 702 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 25 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __magic_name__ ( ) -> Dict:
a__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
a__ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(UpperCamelCase )
# Let's go
a__ = parser.parse_args()
if not hasattr(UpperCamelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
a__ = args.func(UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 273 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase(unittest.TestCase ):
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = 'laion/clap-htsat-unfused'
a__ = tempfile.mkdtemp()
def lowercase__ ( self , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE )
def lowercase__ ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = self.get_tokenizer()
a__ = self.get_feature_extractor()
a__ = ClapProcessor(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
a__ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
a__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
a__ = self.get_feature_extractor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
a__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ = self.get_feature_extractor()
a__ = self.get_tokenizer()
a__ = ClapProcessor(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
a__ = floats_list((3, 1_0_0_0) )
a__ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' )
a__ = processor(audios=__SCREAMING_SNAKE_CASE , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
a__ = self.get_feature_extractor()
a__ = self.get_tokenizer()
a__ = ClapProcessor(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
a__ = 'This is a test string'
a__ = processor(text=__SCREAMING_SNAKE_CASE )
a__ = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = self.get_feature_extractor()
a__ = self.get_tokenizer()
a__ = ClapProcessor(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
a__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ = processor.batch_decode(__SCREAMING_SNAKE_CASE )
a__ = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
a__ = self.get_feature_extractor()
a__ = self.get_tokenizer()
a__ = ClapProcessor(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 273 | 1 |
def A ( lowercase__ : List[str]=2_8123 ) -> Union[str, Any]:
UpperCamelCase__ :Optional[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
UpperCamelCase__ :Optional[int] = set()
UpperCamelCase__ :Optional[Any] = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution()) | 709 |
def A ( lowercase__ : List[str]=2_8123 ) -> Union[str, Any]:
UpperCamelCase__ :Optional[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
UpperCamelCase__ :Optional[int] = set()
UpperCamelCase__ :Optional[Any] = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution()) | 383 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=lowercase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase : List[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowercase__ , from_pt=lowercase__ , dtype=jnp.bfloataa )
_UpperCamelCase : Optional[Any] = controlnet_params
_UpperCamelCase : Optional[Any] = "bird"
_UpperCamelCase : List[Any] = jax.device_count()
_UpperCamelCase : Any = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
_UpperCamelCase : Any = pipe.prepare_image_inputs([canny_image] * num_samples )
_UpperCamelCase : Tuple = jax.random.PRNGKey(0 )
_UpperCamelCase : Any = jax.random.split(lowercase__ , jax.device_count() )
_UpperCamelCase : Optional[int] = replicate(lowercase__ )
_UpperCamelCase : Optional[Any] = shard(lowercase__ )
_UpperCamelCase : int = shard(lowercase__ )
_UpperCamelCase : List[Any] = pipe(
prompt_ids=lowercase__ , image=lowercase__ , params=lowercase__ , prng_seed=lowercase__ , num_inference_steps=50 , jit=lowercase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase : Dict = images[0, 253:256, 253:256, -1]
_UpperCamelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase : List[Any] = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=lowercase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase : List[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowercase__ , from_pt=lowercase__ , dtype=jnp.bfloataa )
_UpperCamelCase : Union[str, Any] = controlnet_params
_UpperCamelCase : Optional[int] = "Chef in the kitchen"
_UpperCamelCase : Any = jax.device_count()
_UpperCamelCase : List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
_UpperCamelCase : str = pipe.prepare_image_inputs([pose_image] * num_samples )
_UpperCamelCase : str = jax.random.PRNGKey(0 )
_UpperCamelCase : Union[str, Any] = jax.random.split(lowercase__ , jax.device_count() )
_UpperCamelCase : Union[str, Any] = replicate(lowercase__ )
_UpperCamelCase : Union[str, Any] = shard(lowercase__ )
_UpperCamelCase : List[str] = shard(lowercase__ )
_UpperCamelCase : int = pipe(
prompt_ids=lowercase__ , image=lowercase__ , params=lowercase__ , prng_seed=lowercase__ , num_inference_steps=50 , jit=lowercase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase : List[Any] = images[0, 253:256, 253:256, -1]
_UpperCamelCase : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase : Tuple = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 435 | '''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=lowercase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase : List[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowercase__ , from_pt=lowercase__ , dtype=jnp.bfloataa )
_UpperCamelCase : Optional[Any] = controlnet_params
_UpperCamelCase : Optional[Any] = "bird"
_UpperCamelCase : List[Any] = jax.device_count()
_UpperCamelCase : Any = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
_UpperCamelCase : Any = pipe.prepare_image_inputs([canny_image] * num_samples )
_UpperCamelCase : Tuple = jax.random.PRNGKey(0 )
_UpperCamelCase : Any = jax.random.split(lowercase__ , jax.device_count() )
_UpperCamelCase : Optional[int] = replicate(lowercase__ )
_UpperCamelCase : Optional[Any] = shard(lowercase__ )
_UpperCamelCase : int = shard(lowercase__ )
_UpperCamelCase : List[Any] = pipe(
prompt_ids=lowercase__ , image=lowercase__ , params=lowercase__ , prng_seed=lowercase__ , num_inference_steps=50 , jit=lowercase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase : Dict = images[0, 253:256, 253:256, -1]
_UpperCamelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase : List[Any] = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=lowercase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase : List[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowercase__ , from_pt=lowercase__ , dtype=jnp.bfloataa )
_UpperCamelCase : Union[str, Any] = controlnet_params
_UpperCamelCase : Optional[int] = "Chef in the kitchen"
_UpperCamelCase : Any = jax.device_count()
_UpperCamelCase : List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
_UpperCamelCase : str = pipe.prepare_image_inputs([pose_image] * num_samples )
_UpperCamelCase : str = jax.random.PRNGKey(0 )
_UpperCamelCase : Union[str, Any] = jax.random.split(lowercase__ , jax.device_count() )
_UpperCamelCase : Union[str, Any] = replicate(lowercase__ )
_UpperCamelCase : Union[str, Any] = shard(lowercase__ )
_UpperCamelCase : List[str] = shard(lowercase__ )
_UpperCamelCase : int = pipe(
prompt_ids=lowercase__ , image=lowercase__ , params=lowercase__ , prng_seed=lowercase__ , num_inference_steps=50 , jit=lowercase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase : List[Any] = images[0, 253:256, 253:256, -1]
_UpperCamelCase : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase : Tuple = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 435 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _lowercase ( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowercase__ ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase_ : nn.Module , UpperCAmelCase_ : int ):
super().__init__()
SCREAMING_SNAKE_CASE__ = module
SCREAMING_SNAKE_CASE__ = nn.Sequential(
nn.Linear(module.in_features , UpperCAmelCase_ , bias=UpperCAmelCase_ ) , nn.Linear(UpperCAmelCase_ , module.out_features , bias=UpperCAmelCase_ ) , )
SCREAMING_SNAKE_CASE__ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def A_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ):
return self.module(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ ) + self.adapter(UpperCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A__ : Any ="""bigscience/bloom-1b7"""
# Constant values
A__ : Union[str, Any] =2.109_6595_5269_2574
A__ : List[str] ="""Hello my name is"""
A__ : List[str] =set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
A__ : Optional[Any] =1_0
def A_ ( self : str ):
# Models and tokenizer
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(self.model_name )
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : int ):
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
def A_ ( self : int ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.model_abit.config
self.assertTrue(hasattr(UpperCAmelCase_ , 'quantization_config' ) )
SCREAMING_SNAKE_CASE__ = config.to_dict()
SCREAMING_SNAKE_CASE__ = config.to_diff_dict()
SCREAMING_SNAKE_CASE__ = config.to_json_string()
def A_ ( self : Union[str, Any] ):
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE__ = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE__ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def A_ ( self : List[Any] ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCAmelCase_ , device_map='auto' )
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def A_ ( self : str ):
with self.assertRaises(UpperCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCAmelCase_ )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = BitsAndBytesConfig()
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCAmelCase_ , load_in_abit=UpperCAmelCase_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def A_ ( self : str ):
with self.assertRaises(UpperCAmelCase_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(UpperCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(UpperCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(UpperCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE__ = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE__ = self.model_fpaa.to('cpu' )
# Check this does not throw an error
SCREAMING_SNAKE_CASE__ = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE__ = self.model_fpaa.float()
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=UpperCAmelCase_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
@classmethod
def A_ ( cls : Dict ):
SCREAMING_SNAKE_CASE__ = 't5-small'
SCREAMING_SNAKE_CASE__ = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE__ = 'Translate in German: Hello, my dog is cute'
def A_ ( self : List[Any] ):
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : int ):
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE__ = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ = None
# test with `t5-small`
SCREAMING_SNAKE_CASE__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase_ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = modules
def A_ ( self : Union[str, Any] ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase_ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase_ )
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : int ):
super().setUp()
# model_name
SCREAMING_SNAKE_CASE__ = 'bigscience/bloom-560m'
SCREAMING_SNAKE_CASE__ = 't5-small'
# Different types of model
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
# Sequence classification model
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
# CausalLM model
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
# Seq2seq model
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCAmelCase_ , device_map='auto' )
def A_ ( self : List[str] ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : int ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : Optional[int] ):
super().setUp()
def A_ ( self : Union[str, Any] ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE__ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : Any ):
super().setUp()
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCAmelCase_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
SCREAMING_SNAKE_CASE__ = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = 'facebook/opt-350m'
super().setUp()
def A_ ( self : Tuple ):
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE__ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ = LoRALayer(module.q_proj , rank=16 )
SCREAMING_SNAKE_CASE__ = LoRALayer(module.k_proj , rank=16 )
SCREAMING_SNAKE_CASE__ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE__ = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE__ = model.forward(**UpperCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(UpperCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowercase__ ( _UpperCAmelCase ):
A__ : List[Any] ="""gpt2-xl"""
A__ : Optional[Any] =3.3191_8548_5415_2187
| 702 |
from collections import defaultdict
class lowercase__ :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
SCREAMING_SNAKE_CASE__ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCAmelCase_ ) )
]
SCREAMING_SNAKE_CASE__ = defaultdict(UpperCAmelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
SCREAMING_SNAKE_CASE__ = (1 << len(UpperCAmelCase_ )) - 1
def A_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
SCREAMING_SNAKE_CASE__ = self.count_ways_until(UpperCAmelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
SCREAMING_SNAKE_CASE__ = total_ways_util
return self.dp[mask][task_no]
def A_ ( self : Optional[Any] , UpperCAmelCase_ : List[str] ):
# Store the list of persons for each task
for i in range(len(UpperCAmelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCAmelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 400 | 0 |
def lowercase ( __A : int = 3 , __A : int = 7 , __A : int = 100_0000 ) -> int:
'''simple docstring'''
snake_case : Tuple = 0
snake_case : Union[str, Any] = 1
for current_denominator in range(1 , limit + 1 ):
snake_case : List[str] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case : List[str] = current_numerator
snake_case : List[Any] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 36 |
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = len(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = [[0] * n for i in range(UpperCAmelCase )]
for i in range(UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = y_points[i]
for i in range(2 ,UpperCAmelCase ):
for j in range(UpperCAmelCase ,UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393 | 0 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
snake_case_ = re.match(R'''^mobilenet_v1_([^_]*)_([^_]*)$''' , SCREAMING_SNAKE_CASE )
if matches:
snake_case_ = float(matches[1] )
snake_case_ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
snake_case_ = 1001
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = '''huggingface/label-files'''
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()}
snake_case_ = '''background'''
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase ()-> List[str]:
"""simple docstring"""
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False )-> int:
"""simple docstring"""
snake_case_ = get_mobilenet_va_config(SCREAMING_SNAKE_CASE )
# Load 🤗 model
snake_case_ = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
snake_case_ = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
snake_case_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case_ = model(**SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
snake_case_ = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
snake_case_ = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
snake_case_ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print('''Pushing to the hub...''' )
snake_case_ = '''google/''' + model_name
image_processor.push_to_hub(SCREAMING_SNAKE_CASE )
model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 717 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __lowerCAmelCase (SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None )-> Tuple:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
__snake_case = field(
metadata={"help": "The csv file to plot."} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={"help": "Disable logarithmic scale when plotting"} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
__snake_case = list_field(
default=lowerCamelCase__ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
snake_case_ = args
snake_case_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
snake_case_ = csv.DictReader(_UpperCAmelCase )
for row in reader:
snake_case_ = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
snake_case_ = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
snake_case_ = float(row['''result'''] )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = plt.subplots()
snake_case_ = '''Time usage''' if self.args.is_time else '''Memory usage'''
snake_case_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
snake_case_ = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
snake_case_ = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
snake_case_ = self.result_dict[model_name]['''result''']
((snake_case_) , (snake_case_)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
snake_case_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
snake_case_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_UpperCAmelCase , )
else:
snake_case_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((snake_case_) , (snake_case_)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
snake_case_ = np.asarray(_UpperCAmelCase , _UpperCAmelCase )[: len(_UpperCAmelCase )]
plt.scatter(
_UpperCAmelCase , _UpperCAmelCase , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_UpperCAmelCase , _UpperCAmelCase , '''--''' )
title_str += F''' {label_model_name} vs.'''
snake_case_ = title_str[:-4]
snake_case_ = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_UpperCAmelCase )
plt.xlabel(_UpperCAmelCase )
plt.ylabel(_UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __lowerCAmelCase ()-> int:
"""simple docstring"""
snake_case_ = HfArgumentParser(SCREAMING_SNAKE_CASE )
snake_case_ = parser.parse_args_into_dataclasses()[0]
snake_case_ = Plot(args=SCREAMING_SNAKE_CASE )
plot.plot()
if __name__ == "__main__":
main() | 531 | 0 |
# flake8: noqa
# Lint as: python3
lowerCAmelCase__ : Optional[Any] =[
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 101 |
lowerCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase__ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 )
lowercase__ : List[str] = state_late + state_absent + state_ontime
lowercase__ : List[Any] = prizestrings
return prizestrings
def UpperCamelCase ( lowercase_ = 30 ) -> int:
'''simple docstring'''
return _calculate(lowercase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 12 | 0 |
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class a_ :
@add_start_docstrings(a_ )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class a_ :
@add_start_docstrings(a_ )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class a_ ( __lowerCamelCase ):
@add_start_docstrings(a_ )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
for processor in self:
UpperCamelCase = inspect.signature(processor.__call__ ).parameters
if len(a_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"Make sure that all the required parameters: {list(function_args.keys() )} for "
F"{processor.__class__} are passed to the logits processor." )
UpperCamelCase = processor(a_ , a_ , a_ , **a_ )
else:
UpperCamelCase = processor(a_ , a_ , a_ )
return scores
class a_ ( __lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if not isinstance(a_ , a_ ) or not (temperature > 0):
raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}" )
UpperCamelCase = temperature
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = scores / self.temperature
return scores
class a_ ( __lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -float("""Inf""" ) , _SCREAMING_SNAKE_CASE = 1 ) -> Tuple:
"""simple docstring"""
if not isinstance(a_ , a_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(a_ , a_ ) or (min_tokens_to_keep < 1):
raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
UpperCamelCase = top_p
UpperCamelCase = filter_value
UpperCamelCase = min_tokens_to_keep
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = lax.top_k(a_ , scores.shape[-1] )
UpperCamelCase = jnp.full_like(a_ , self.filter_value )
UpperCamelCase = jax.nn.softmax(a_ , axis=-1 ).cumsum(axis=-1 )
UpperCamelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase = jnp.roll(a_ , 1 )
score_mask |= score_mask.at[:, 0].set(a_ )
# min tokens to keep
UpperCamelCase = score_mask.at[:, : self.min_tokens_to_keep].set(a_ )
UpperCamelCase = jnp.where(a_ , a_ , a_ )
UpperCamelCase = jax.lax.sort_key_val(a_ , a_ )[-1]
return next_scores
class a_ ( __lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -float("""Inf""" ) , _SCREAMING_SNAKE_CASE = 1 ) -> List[Any]:
"""simple docstring"""
if not isinstance(a_ , a_ ) or top_k <= 0:
raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}" )
UpperCamelCase = max(a_ , a_ )
UpperCamelCase = filter_value
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = scores.shape
UpperCamelCase = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCamelCase = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCamelCase = lax.top_k(a_ , a_ )
UpperCamelCase = jnp.broadcast_to((jnp.arange(a_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCamelCase = topk_scores.flatten()
UpperCamelCase = topk_indices.flatten() + shift
UpperCamelCase = next_scores_flat.at[topk_indices_flat].set(a_ )
UpperCamelCase = next_scores_flat.reshape(a_ , a_ )
return next_scores
class a_ ( __lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = bos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float("""inf""" ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase = jnp.where(a_ , new_scores.at[:, self.bos_token_id].set(0 ) , a_ )
return scores
class a_ ( __lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = max_length
UpperCamelCase = eos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float("""inf""" ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase = jnp.where(a_ , new_scores.at[:, self.eos_token_id].set(0 ) , a_ )
return scores
class a_ ( __lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if not isinstance(a_ , a_ ) or min_length < 0:
raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(a_ , a_ ) or eos_token_id < 0:
raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
UpperCamelCase = min_length
UpperCamelCase = eos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCamelCase = jnp.where(a_ , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , a_ )
return scores
class a_ ( __lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = list(a_ )
UpperCamelCase = begin_index
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase = jnp.where(a_ , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , a_ )
return scores
class a_ ( __lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = list(a_ )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class a_ ( __lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = dict(a_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase = force_token_array.at[index].set(a_ )
UpperCamelCase = jnp.intaa(a_ )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
def _force_token(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = scores.shape[0]
UpperCamelCase = self.force_token_array[generation_idx]
UpperCamelCase = jnp.ones_like(a_ , dtype=scores.dtype ) * -float("""inf""" )
UpperCamelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCamelCase = lax.dynamic_update_slice(a_ , a_ , (0, current_token) )
return new_scores
UpperCamelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(a_ ) , lambda: scores , ) , )
return scores
class a_ ( __lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = generate_config.eos_token_id
UpperCamelCase = generate_config.no_timestamps_token_id
UpperCamelCase = generate_config.no_timestamps_token_id + 1
UpperCamelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(a_ , """max_initial_timestamp_index""" ):
UpperCamelCase = generate_config.max_initial_timestamp_index
else:
UpperCamelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase = model_config.vocab_size
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = jnp.where((cur_len - self.begin_index) >= 1 , a_ , a_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , a_ , )
UpperCamelCase = jnp.where((cur_len - self.begin_index) < 2 , a_ , a_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , a_ , a_ , )
return jnp.where(
a_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , a_ , )
UpperCamelCase = jax.vmap(a_ )(a_ , a_ )
UpperCamelCase = jnp.where(cur_len == self.begin_index , a_ , a_ )
UpperCamelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , a_ , )
UpperCamelCase = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase = jnp.where(
a_ , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , a_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase = jax.nn.log_softmax(a_ , axis=-1 )
def handle_cumulative_probs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCamelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , a_ , )
UpperCamelCase = jax.vmap(a_ )(a_ , a_ )
return scores
| 706 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a_ ( lowerCamelCase ):
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """tf_padding""" ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """depth_multiplier""" ) )
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.2_5 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="relu6" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = depth_multiplier
UpperCamelCase = min_depth
UpperCamelCase = tf_padding
UpperCamelCase = int(last_hidden_size * depth_multiplier )
UpperCamelCase = output_stride
UpperCamelCase = hidden_act
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = use_labels
UpperCamelCase = is_training
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = scope
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = MobileNetVaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = MobileNetVaModelTester(self )
UpperCamelCase = MobileNetVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def A__ ( self ) -> Dict:
"""simple docstring"""
pass
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = 26
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def A__ ( self ) -> Dict:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MobileNetVaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowercase__ ( )-> Optional[Any]:
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Dict:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 35 | 0 |
import operator
def A_ ( lowercase_ , lowercase_ = False , lowercase_ = None ) -> list:
_snake_case : Optional[int] = operator.lt if reverse else operator.gt
_snake_case : Dict = solution or []
if not arr:
return solution
_snake_case : List[str] = [arr.pop(0 )]
for i, item in enumerate(lowercase_ ):
if _operator(lowercase_ , sublist[-1] ):
sublist.append(lowercase_ )
arr.pop(lowercase_ )
# merging sublist into solution list
if not solution:
solution.extend(lowercase_ )
else:
while sublist:
_snake_case : List[str] = sublist.pop(0 )
for i, xx in enumerate(lowercase_ ):
if not _operator(lowercase_ , lowercase_ ):
solution.insert(lowercase_ , lowercase_ )
break
else:
solution.append(lowercase_ )
strand_sort(lowercase_ , lowercase_ , lowercase_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 326 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A (__UpperCAmelCase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE = MvpTokenizer
_SCREAMING_SNAKE_CASE = MvpTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = filter_roberta_detectors
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_snake_case : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_snake_case : Tuple = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_snake_case : str = {'''unk_token''': '''<unk>'''}
_snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
def __a ( self , **lowercase_ ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __a ( self , **lowercase_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __a ( self , lowercase_ ) -> Any:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def __a ( self ) -> int:
'''simple docstring'''
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def __a ( self ) -> str:
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def __a ( self ) -> List[str]:
'''simple docstring'''
_snake_case : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_snake_case : Optional[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : Optional[int] = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors='''pt''' )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case : int = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
# Test that special tokens are reset
@require_torch
def __a ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : str = tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , lowercase_ )
self.assertIn('''attention_mask''' , lowercase_ )
self.assertNotIn('''labels''' , lowercase_ )
self.assertNotIn('''decoder_attention_mask''' , lowercase_ )
@require_torch
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : List[str] = tokenizer(text_target=lowercase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __a ( self ) -> Tuple:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : Union[str, Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=lowercase_ , truncation=lowercase_ , return_tensors='''pt''' )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def __a ( self ) -> int:
'''simple docstring'''
_snake_case : Dict = ['''A long paragraph for summarization.''']
_snake_case : List[str] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : Dict = tokenizer(lowercase_ , text_target=lowercase_ , return_tensors='''pt''' )
_snake_case : List[Any] = inputs['''input_ids''']
_snake_case : Dict = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def __a ( self ) -> List[Any]:
'''simple docstring'''
pass
def __a ( self ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case : Dict = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_snake_case : str = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_snake_case : Optional[Any] = '''A, <mask> AllenNLP sentence.'''
_snake_case : Optional[Any] = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
_snake_case : str = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_snake_case : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_snake_case : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 326 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[int | str] ) -> None:
"""simple docstring"""
create_state_space_tree(UpperCamelCase , [] , 0 , [0 for i in range(len(UpperCamelCase ) )] )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[int | str] , UpperCamelCase : list[int | str] , UpperCamelCase : int , UpperCamelCase : list[int] , ) -> None:
"""simple docstring"""
if index == len(UpperCamelCase ):
print(UpperCamelCase )
return
for i in range(len(UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
a_ = True
create_state_space_tree(UpperCamelCase , UpperCamelCase , index + 1 , UpperCamelCase )
current_sequence.pop()
a_ = False
_A = [3, 1, 2, 4]
generate_all_permutations(sequence)
_A = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 403 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : "DiagonalGaussianDistribution"
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : Union[str, Any] = True
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = ("DownEncoderBlock2D",) , _SCREAMING_SNAKE_CASE = ("UpDecoderBlock2D",) , _SCREAMING_SNAKE_CASE = (64,) , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = "silu" , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 0.1_8_2_1_5 , ):
super().__init__()
# pass init params to Encoder
a_ = Encoder(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , down_block_types=_SCREAMING_SNAKE_CASE , block_out_channels=_SCREAMING_SNAKE_CASE , layers_per_block=_SCREAMING_SNAKE_CASE , act_fn=_SCREAMING_SNAKE_CASE , norm_num_groups=_SCREAMING_SNAKE_CASE , double_z=_SCREAMING_SNAKE_CASE , )
# pass init params to Decoder
a_ = Decoder(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , up_block_types=_SCREAMING_SNAKE_CASE , block_out_channels=_SCREAMING_SNAKE_CASE , layers_per_block=_SCREAMING_SNAKE_CASE , norm_num_groups=_SCREAMING_SNAKE_CASE , act_fn=_SCREAMING_SNAKE_CASE , )
a_ = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
a_ = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
a_ = False
a_ = False
# only relevant if vae tiling is enabled
a_ = self.config.sample_size
a_ = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
a_ = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
a_ = 0.2_5
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if isinstance(_SCREAMING_SNAKE_CASE , (Encoder, Decoder) ):
a_ = value
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE = True ):
a_ = use_tiling
def __magic_name__ ( self ):
self.enable_tiling(_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = True
def __magic_name__ ( self ):
a_ = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self ):
a_ = {}
def fn_recursive_add_processors(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE , """set_processor""" ):
a_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return processors
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
a_ = len(self.attn_processors.keys() )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(_SCREAMING_SNAKE_CASE )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE , """set_processor""" ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
module.set_processor(_SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
if self.use_slicing and x.shape[0] > 1:
a_ = [self.encoder(_SCREAMING_SNAKE_CASE ) for x_slice in x.split(1 )]
a_ = torch.cat(_SCREAMING_SNAKE_CASE )
else:
a_ = self.encoder(_SCREAMING_SNAKE_CASE )
a_ = self.quant_conv(_SCREAMING_SNAKE_CASE )
a_ = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
a_ = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
a_ = self.decoder(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
@apply_forward_hook
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ):
if self.use_slicing and z.shape[0] > 1:
a_ = [self._decode(_SCREAMING_SNAKE_CASE ).sample for z_slice in z.split(1 )]
a_ = torch.cat(_SCREAMING_SNAKE_CASE )
else:
a_ = self._decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = min(a.shape[2] , b.shape[2] , _SCREAMING_SNAKE_CASE )
for y in range(_SCREAMING_SNAKE_CASE ):
a_ = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = min(a.shape[3] , b.shape[3] , _SCREAMING_SNAKE_CASE )
for x in range(_SCREAMING_SNAKE_CASE ):
a_ = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ):
a_ = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
a_ = int(self.tile_latent_min_size * self.tile_overlap_factor )
a_ = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
a_ = []
for i in range(0 , x.shape[2] , _SCREAMING_SNAKE_CASE ):
a_ = []
for j in range(0 , x.shape[3] , _SCREAMING_SNAKE_CASE ):
a_ = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
a_ = self.encoder(_SCREAMING_SNAKE_CASE )
a_ = self.quant_conv(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
a_ = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
a_ = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a_ = self.blend_v(rows[i - 1][j] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if j > 0:
a_ = self.blend_h(row[j - 1] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE , dim=3 ) )
a_ = torch.cat(_SCREAMING_SNAKE_CASE , dim=2 )
a_ = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ):
a_ = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
a_ = int(self.tile_sample_min_size * self.tile_overlap_factor )
a_ = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
a_ = []
for i in range(0 , z.shape[2] , _SCREAMING_SNAKE_CASE ):
a_ = []
for j in range(0 , z.shape[3] , _SCREAMING_SNAKE_CASE ):
a_ = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
a_ = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
a_ = self.decoder(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
a_ = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
a_ = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a_ = self.blend_v(rows[i - 1][j] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if j > 0:
a_ = self.blend_h(row[j - 1] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE , dim=3 ) )
a_ = torch.cat(_SCREAMING_SNAKE_CASE , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , ):
a_ = sample
a_ = self.encode(_SCREAMING_SNAKE_CASE ).latent_dist
if sample_posterior:
a_ = posterior.sample(generator=_SCREAMING_SNAKE_CASE )
else:
a_ = posterior.mode()
a_ = self.decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE ) | 403 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : str = [10, 20, 30, 40, 50, 60]
A_ : int = [2, 4, 6, 8, 10, 12]
A_ : Tuple = 100
self.assertEqual(kp.calc_profit(_a ,_a ,_a ) ,210 )
def _a ( self : List[str] ):
'''simple docstring'''
self.assertRaisesRegex(_a ,"""max_weight must greater than zero.""" )
def _a ( self : List[str] ):
'''simple docstring'''
self.assertRaisesRegex(_a ,"""Weight can not be negative.""" )
def _a ( self : Dict ):
'''simple docstring'''
self.assertRaisesRegex(_a ,"""Profit can not be negative.""" )
def _a ( self : Tuple ):
'''simple docstring'''
self.assertRaisesRegex(_a ,"""max_weight must greater than zero.""" )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
self.assertRaisesRegex(
_a ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 665 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__SCREAMING_SNAKE_CASE : Tuple ='''bart'''
__SCREAMING_SNAKE_CASE : List[Any] =True
@st.cache(allow_output_mutation=lowerCAmelCase__ )
def UpperCamelCase__ ( ):
if LOAD_DENSE_INDEX:
lowercase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
lowercase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
lowercase = qar_model.eval()
else:
lowercase , lowercase = (None, None)
if MODEL_TYPE == "bart":
lowercase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
lowercase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
lowercase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
lowercase = sas_model.eval()
else:
lowercase , lowercase = make_qa_sas_model(
model_name="""t5-small""" ,from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" ,device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase__ )
def UpperCamelCase__ ( ):
if LOAD_DENSE_INDEX:
lowercase = faiss.StandardGpuResources()
lowercase = datasets.load_dataset(path="""wiki_snippets""" ,name="""wiki40b_en_100_0""" )["""train"""]
lowercase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" ,dtype="""float32""" ,mode="""r""" ,shape=(wikiaab_passages.num_rows, 128) ,)
lowercase = faiss.IndexFlatIP(128 )
lowercase = faiss.index_cpu_to_gpu(lowerCAmelCase__ ,1 ,lowerCAmelCase__ )
wikiaab_gpu_index_flat.add(lowerCAmelCase__ ) # TODO fix for larger GPU
else:
lowercase , lowercase = (None, None)
lowercase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase__ )
def UpperCamelCase__ ( ):
lowercase = datasets.load_dataset("""eli5""" ,name="""LFQA_reddit""" )
lowercase = elia["""train_eli5"""]
lowercase = np.memmap(
"""eli5_questions_reps.dat""" ,dtype="""float32""" ,mode="""r""" ,shape=(elia_train.num_rows, 128) )
lowercase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCAmelCase__ )
return (elia_train, eli5_train_q_index)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str =load_indexes()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] =load_models()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] =load_train_data()
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=10 ):
lowercase = embed_questions_for_retrieval([question] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase , lowercase = eli5_train_q_index.search(lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase = [elia_train[int(lowerCAmelCase__ )] for i in I[0]]
return nn_examples
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__="wiki40b" ,lowerCAmelCase__="dense" ,lowerCAmelCase__=10 ):
if source == "none":
lowercase , lowercase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowercase , lowercase = query_qa_dense_index(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
else:
lowercase , lowercase = query_es_index(
lowerCAmelCase__ ,lowerCAmelCase__ ,index_name="""english_wiki40b_snippets_100w""" ,n_results=lowerCAmelCase__ ,)
lowercase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
lowercase = """question: {} context: {}""".format(lowerCAmelCase__ ,lowerCAmelCase__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCAmelCase__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCAmelCase__ : None),
} )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=64 ,lowerCAmelCase__=256 ,lowerCAmelCase__=False ,lowerCAmelCase__=2 ,lowerCAmelCase__=0.95 ,lowerCAmelCase__=0.8 ):
with torch.no_grad():
lowercase = qa_sas_generate(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,num_answers=1 ,num_beams=lowerCAmelCase__ ,min_len=lowerCAmelCase__ ,max_len=lowerCAmelCase__ ,do_sample=lowerCAmelCase__ ,temp=lowerCAmelCase__ ,top_p=lowerCAmelCase__ ,top_k=lowerCAmelCase__ ,max_input_length=1_024 ,device="""cuda:0""" ,)[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__SCREAMING_SNAKE_CASE : Union[str, Any] ='''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__SCREAMING_SNAKE_CASE : Tuple ='''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__SCREAMING_SNAKE_CASE : str ='''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__SCREAMING_SNAKE_CASE : Dict =[
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__SCREAMING_SNAKE_CASE : Optional[Any] =st.sidebar.checkbox('''Demo options''')
if demo_options:
__SCREAMING_SNAKE_CASE : List[str] =st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__SCREAMING_SNAKE_CASE : List[Any] =action_list.index(action_st)
__SCREAMING_SNAKE_CASE : List[str] =st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] =show_type == '''Show full text of passages'''
else:
__SCREAMING_SNAKE_CASE : Optional[int] =3
__SCREAMING_SNAKE_CASE : Tuple =True
__SCREAMING_SNAKE_CASE : Dict =st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__SCREAMING_SNAKE_CASE : Optional[int] ='''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__SCREAMING_SNAKE_CASE : int =st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__SCREAMING_SNAKE_CASE : Dict =st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__SCREAMING_SNAKE_CASE : List[str] ='''wiki40b'''
__SCREAMING_SNAKE_CASE : int ='''dense'''
__SCREAMING_SNAKE_CASE : str ='''beam'''
__SCREAMING_SNAKE_CASE : List[Any] =2
__SCREAMING_SNAKE_CASE : Union[str, Any] =64
__SCREAMING_SNAKE_CASE : List[str] =256
__SCREAMING_SNAKE_CASE : Optional[int] =None
__SCREAMING_SNAKE_CASE : Tuple =None
__SCREAMING_SNAKE_CASE : str =st.sidebar.checkbox('''Generation options''')
if generate_options:
__SCREAMING_SNAKE_CASE : Dict ='''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__SCREAMING_SNAKE_CASE : Optional[Any] =st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__SCREAMING_SNAKE_CASE : Optional[Any] =st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__SCREAMING_SNAKE_CASE : Any =st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__SCREAMING_SNAKE_CASE : int =st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__SCREAMING_SNAKE_CASE : Optional[int] =st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__SCREAMING_SNAKE_CASE : Optional[int] =st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__SCREAMING_SNAKE_CASE : Any =None
# start main text
__SCREAMING_SNAKE_CASE : Dict =[
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__SCREAMING_SNAKE_CASE : str =st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__SCREAMING_SNAKE_CASE : Optional[Any] =st.text_input('''Enter your question here:''', '''''')
else:
__SCREAMING_SNAKE_CASE : Dict =question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] =make_support(question, source=wiki_source, method='''dense''', n_results=10)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] =make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__SCREAMING_SNAKE_CASE : int =[]
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__SCREAMING_SNAKE_CASE : Union[str, Any] =support_list[:10]
__SCREAMING_SNAKE_CASE : str ='''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int =make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict =answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__SCREAMING_SNAKE_CASE : Tuple ='''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__SCREAMING_SNAKE_CASE : Optional[int] =res[1].strip()
if sec_titles == "":
__SCREAMING_SNAKE_CASE : str ='''[{}]({})'''.format(res[0], wiki_url)
else:
__SCREAMING_SNAKE_CASE : Dict =sec_titles.split(''' & ''')
__SCREAMING_SNAKE_CASE : int =''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__SCREAMING_SNAKE_CASE : Tuple =find_nearest_training(question)
__SCREAMING_SNAKE_CASE : Optional[Any] =nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__SCREAMING_SNAKE_CASE : List[str] =[
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__SCREAMING_SNAKE_CASE : Optional[Any] ='''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 72 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = multiprocessing.Manager()
lowercase = manager.list()
lowercase = multiprocessing.Process(target=lowerCAmelCase__ ,args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowercase = shutil.rmtree
lowercase = os.rmdir
lowercase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowercase = {}
with swallow_io():
with time_limit(lowerCAmelCase__ ):
exec(lowerCAmelCase__ ,lowerCAmelCase__ )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
lowercase = rmtree
lowercase = rmdir
lowercase = chdir
@contextlib.contextmanager
def UpperCamelCase__ ( lowerCAmelCase__ ):
def signal_handler(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL ,lowerCAmelCase__ )
signal.signal(signal.SIGALRM ,lowerCAmelCase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL ,0 )
@contextlib.contextmanager
def UpperCamelCase__ ( ):
lowercase = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowerCAmelCase__ ):
with contextlib.redirect_stderr(lowerCAmelCase__ ):
with redirect_stdin(lowerCAmelCase__ ):
yield
@contextlib.contextmanager
def UpperCamelCase__ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowerCAmelCase__ ):
yield dirname
class A_ ( __a ):
pass
class A_ ( io.StringIO ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case__ : int , **snake_case__ : int ):
raise OSError
def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case__ : Optional[Any] , **snake_case__ : int ):
raise OSError
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
raise OSError
def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : Any ):
return False
class A_ ( contextlib._RedirectStream ): # type: ignore
_A :List[Any] = '''stdin'''
@contextlib.contextmanager
def UpperCamelCase__ ( lowerCAmelCase__ ):
if root == ".":
yield
return
lowercase = os.getcwd()
os.chdir(lowerCAmelCase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS ,(maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA ,(maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK ,(maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowercase = None
lowercase = None
import os
lowercase = """1"""
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
import shutil
lowercase = None
lowercase = None
lowercase = None
import subprocess
lowercase = None # type: ignore
lowercase = None
import sys
lowercase = None
lowercase = None
lowercase = None
lowercase = None
lowercase = None
| 72 | 1 |
import baseaa
def lowercase ( a ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def lowercase ( a ):
'''simple docstring'''
return baseaa.aaadecode(a ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 631 |
from timeit import timeit
def lowercase ( a ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
SCREAMING_SNAKE_CASE_ :Optional[int] = 0
while number:
number &= number - 1
result += 1
return result
def lowercase ( a ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
SCREAMING_SNAKE_CASE_ :List[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase ( ):
'''simple docstring'''
def do_benchmark(a ) -> None:
SCREAMING_SNAKE_CASE_ :List[Any] = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(a ) = }" )
SCREAMING_SNAKE_CASE_ :List[Any] = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=a )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(a ) = }" )
SCREAMING_SNAKE_CASE_ :str = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=a , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 631 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : List[str] = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set("123456789" )
def __lowerCamelCase ( ) -> int | None:
"""simple docstring"""
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
SCREAMING_SNAKE_CASE_ : int = 1_0_0_0_0_2 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
SCREAMING_SNAKE_CASE_ : List[str] = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 68 | 0 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=24 , lowercase__=2 , lowercase__=6 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=None , lowercase__=1_000 , ) -> str:
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : int = num_labels
SCREAMING_SNAKE_CASE : Tuple = scope
SCREAMING_SNAKE_CASE : Dict = range_bbox
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : List[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : int = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _UpperCamelCase ( self ) -> Optional[Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> int:
SCREAMING_SNAKE_CASE : List[str] = LiltModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , bbox=lowercase__ , token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE : Dict = model(lowercase__ , bbox=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> str:
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : Any = LiltForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(
lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Optional[int]:
SCREAMING_SNAKE_CASE : str = LiltForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(
lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : str = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : Any = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] = False
snake_case__ : List[Any] = False
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
return True
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = LiltModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def _UpperCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : int = type
self.model_tester.create_and_check_model(*lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = LiltModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_torch
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Dict = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[1, 2]] , device=lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(input_ids=lowercase__ , bbox=lowercase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size([1, 2, 768] )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowercase__ , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase__ , atol=1E-3 ) )
| 251 | '''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
snake_case__ : Tuple = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
snake_case__ : Optional[int] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Any:
SCREAMING_SNAKE_CASE : List[Any] = AudioClassificationPipeline(model=lowercase__ , feature_extractor=lowercase__ )
# test with a raw waveform
SCREAMING_SNAKE_CASE : Optional[int] = np.zeros((34_000,) )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = examples
SCREAMING_SNAKE_CASE : Optional[Any] = audio_classifier(lowercase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowercase__ , [
{'score': ANY(lowercase__ ), 'label': ANY(lowercase__ )},
{'score': ANY(lowercase__ ), 'label': ANY(lowercase__ )},
] , )
SCREAMING_SNAKE_CASE : Optional[Any] = audio_classifier(lowercase__ , top_k=1 )
self.assertEqual(
lowercase__ , [
{'score': ANY(lowercase__ ), 'label': ANY(lowercase__ )},
] , )
self.run_torchaudio(lowercase__ )
@require_torchaudio
def _UpperCamelCase ( self , lowercase__ ) -> Dict:
import datasets
# test with a local file
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
SCREAMING_SNAKE_CASE : int = dataset[0]['audio']['array']
SCREAMING_SNAKE_CASE : List[str] = audio_classifier(lowercase__ )
self.assertEqual(
lowercase__ , [
{'score': ANY(lowercase__ ), 'label': ANY(lowercase__ )},
{'score': ANY(lowercase__ ), 'label': ANY(lowercase__ )},
] , )
@require_torch
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : List[Any] = 'anton-l/wav2vec2-random-tiny-classifier'
SCREAMING_SNAKE_CASE : Tuple = pipeline('audio-classification' , model=lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = np.ones((8_000,) )
SCREAMING_SNAKE_CASE : Tuple = audio_classifier(lowercase__ , top_k=4 )
SCREAMING_SNAKE_CASE : Dict = [
{'score': 0.0_8_4_2, 'label': 'no'},
{'score': 0.0_8_3_8, 'label': 'up'},
{'score': 0.0_8_3_7, 'label': 'go'},
{'score': 0.0_8_3_4, 'label': 'right'},
]
SCREAMING_SNAKE_CASE : Tuple = [
{'score': 0.0_8_4_5, 'label': 'stop'},
{'score': 0.0_8_4_4, 'label': 'on'},
{'score': 0.0_8_4_1, 'label': 'right'},
{'score': 0.0_8_3_4, 'label': 'left'},
]
self.assertIn(nested_simplify(lowercase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
SCREAMING_SNAKE_CASE : Tuple = {'array': np.ones((8_000,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
SCREAMING_SNAKE_CASE : List[Any] = audio_classifier(lowercase__ , top_k=4 )
self.assertIn(nested_simplify(lowercase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
import datasets
SCREAMING_SNAKE_CASE : List[str] = 'superb/wav2vec2-base-superb-ks'
SCREAMING_SNAKE_CASE : Optional[int] = pipeline('audio-classification' , model=lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test' )
SCREAMING_SNAKE_CASE : Tuple = np.array(dataset[3]['speech'] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : str = audio_classifier(lowercase__ , top_k=4 )
self.assertEqual(
nested_simplify(lowercase__ , decimals=3 ) , [
{'score': 0.9_8_1, 'label': 'go'},
{'score': 0.0_0_7, 'label': 'up'},
{'score': 0.0_0_6, 'label': '_unknown_'},
{'score': 0.0_0_1, 'label': 'down'},
] , )
@require_tf
@unittest.skip('Audio classification is not implemented for TF' )
def _UpperCamelCase ( self ) -> str:
pass
| 251 | 1 |
'''simple docstring'''
import os
UpperCAmelCase_ : Union[str, Any] = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
def _lowercase ( UpperCamelCase__ : str ):
__A : str = 0
__A : Optional[int] = 0
while index < len(UpperCamelCase__ ) - 1:
__A : Any = SYMBOLS[numerals[index]]
__A : Optional[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _lowercase ( UpperCamelCase__ : int ):
__A : Any = ''
__A : Dict = num // 1000
numerals += m_count * "M"
num %= 1000
__A : Optional[Any] = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__A : Dict = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _lowercase ( UpperCamelCase__ : str = "/p089_roman.txt" ):
__A : Optional[Any] = 0
with open(os.path.dirname(UpperCamelCase__ ) + roman_numerals_filename ) as filea:
__A : str = filea.readlines()
for line in lines:
__A : List[Any] = line.strip()
__A : int = parse_roman_numerals(UpperCamelCase__ )
__A : Dict = generate_roman_numerals(UpperCamelCase__ )
savings += len(UpperCamelCase__ ) - len(UpperCamelCase__ )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''')
| 540 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase_ : int = ''
UpperCAmelCase_ : Union[str, Any] = ''
UpperCAmelCase_ : Any = ''
UpperCAmelCase_ : int = 1 # (0 is vertical, 1 is horizontal)
def _lowercase ( ):
__A ,__A : Optional[int] = get_dataset(UpperCamelCase__, UpperCamelCase__ )
print('Processing...' )
__A ,__A ,__A : Any = update_image_and_anno(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__A : Dict = random_chars(32 )
__A : List[Any] = paths[index].split(os.sep )[-1].rsplit('.', 1 )[0]
__A : Dict = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""", UpperCamelCase__, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__A : Tuple = []
for anno in new_annos[index]:
__A : Any = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""", 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _lowercase ( UpperCamelCase__ : str, UpperCamelCase__ : str ):
__A : Tuple = []
__A : int = []
for label_file in glob.glob(os.path.join(UpperCamelCase__, '*.txt' ) ):
__A : Optional[int] = label_file.split(os.sep )[-1].rsplit('.', 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__A : Optional[Any] = in_file.readlines()
__A : int = os.path.join(UpperCamelCase__, f"""{label_name}.jpg""" )
__A : Optional[int] = []
for obj_list in obj_lists:
__A : str = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def _lowercase ( UpperCamelCase__ : list, UpperCamelCase__ : list, UpperCamelCase__ : int = 1 ):
__A : int = []
__A : Optional[Any] = []
__A : str = []
for idx in range(len(UpperCamelCase__ ) ):
__A : List[Any] = []
__A : List[str] = img_list[idx]
path_list.append(UpperCamelCase__ )
__A : Optional[Any] = anno_list[idx]
__A : Union[str, Any] = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__A : int = cva.flip(UpperCamelCase__, UpperCamelCase__ )
for bbox in img_annos:
__A : Union[str, Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__A : Tuple = cva.flip(UpperCamelCase__, UpperCamelCase__ )
for bbox in img_annos:
__A : Dict = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def _lowercase ( UpperCamelCase__ : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
__A : int = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 540 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Any:
"""simple docstring"""
# Initialise PyTorch model
UpperCamelCase = BertConfig.from_json_file(UpperCAmelCase_ )
print(F"Building PyTorch model from configuration: {config}" )
UpperCamelCase = BertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 554 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __a ( _lowerCAmelCase ):
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any )-> None:
"""simple docstring"""
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 554 | 1 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( _a = "AAPL"):
SCREAMING_SNAKE_CASE : Any = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
SCREAMING_SNAKE_CASE : Any = BeautifulSoup(requests.get(_a).text , "html.parser")
SCREAMING_SNAKE_CASE : Union[str, Any] = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_).find("span").text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''') | 193 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a , "embed_dim" ) )
self.parent.assertTrue(hasattr(a , "num_heads" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int , a : List[str]=13 , a : Optional[int]=64 , a : List[str]=3 , a : Dict=[16, 48, 96] , a : List[Any]=[1, 3, 6] , a : int=[1, 2, 10] , a : List[str]=[7, 3, 3] , a : List[Any]=[4, 2, 2] , a : Tuple=[2, 1, 1] , a : Union[str, Any]=[2, 2, 2] , a : List[str]=[False, False, True] , a : int=[0.0, 0.0, 0.0] , a : Dict=0.02 , a : List[Any]=1e-12 , a : Tuple=True , a : Optional[int]=True , a : Union[str, Any]=2 , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Dict = patch_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = patch_stride
SCREAMING_SNAKE_CASE : str = patch_padding
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : List[str] = num_labels
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = stride_kv
SCREAMING_SNAKE_CASE : Tuple = depth
SCREAMING_SNAKE_CASE : List[Any] = cls_token
SCREAMING_SNAKE_CASE : List[Any] = attention_drop_rate
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
# create a random int32 tensor of given shape
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Tuple , a : str , a : List[str] , a : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = TFCvtModel(config=a )
SCREAMING_SNAKE_CASE : Any = model(a , training=a )
SCREAMING_SNAKE_CASE : List[Any] = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = TFCvtForImageClassification(a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , labels=a , training=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ =(
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = TFCvtModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = TFCvtConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(a )
SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
def check_hidden_states_output(a : List[str] , a : Union[str, Any] , a : List[str] ):
SCREAMING_SNAKE_CASE : Tuple = model_class(a )
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE : int = len(self.model_tester.depth )
self.assertEqual(len(a ) , a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(a , a , a )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = TFCvtModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=a , return_tensors="tf" )
# forward pass
SCREAMING_SNAKE_CASE : List[str] = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE : Tuple = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , a , atol=1e-4 ) ) | 193 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.