code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __a ( *UpperCAmelCase ) ->Dict:
"""simple docstring"""
with open(UpperCAmelCase , """r""" ) as fh:
fcntl.flock(UpperCAmelCase , fcntl.LOCK_EX )
try:
print(*UpperCAmelCase )
finally:
fcntl.flock(UpperCAmelCase , fcntl.LOCK_UN )
_lowerCamelCase : Union[str, Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
_lowerCamelCase : Dict = torch.device('cuda', local_rank)
_lowerCamelCase : Optional[Any] = socket.gethostname()
_lowerCamelCase : Optional[int] = f"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_lowerCamelCase : str = dist.get_rank()
_lowerCamelCase : int = dist.get_world_size()
printflock(f"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(f"{gpu} is broken")
raise
| 258 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BioGptTokenizer
__lowerCAmelCase = False
def A (self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def A (self : Tuple , _lowerCAmelCase : List[str] ):
A = """lower newer"""
A = """lower newer"""
return input_text, output_text
def A (self : List[Any] ):
A = BioGptTokenizer(self.vocab_file , self.merges_file )
A = """lower"""
A = ["""low""", """er</w>"""]
A = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
A = tokens + ["""<unk>"""]
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def A (self : Union[str, Any] ):
A = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 258 | 1 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
assert column_title.isupper()
A = 0
A = len(UpperCAmelCase ) - 1
A = 0
while index >= 0:
A = (ord(column_title[index] ) - 64) * pow(26 , UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 258 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return np.maximum(0 , UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258 | 1 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
for i in range(0 , UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
for i in range(UpperCAmelCase , 0 , -1 ):
for _ in range(UpperCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(UpperCAmelCase ) # upper half
reverse_floyd(UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
_lowerCamelCase : Optional[Any] = 1
while K:
_lowerCamelCase : Dict = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
_lowerCamelCase : Dict = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 258 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=13 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : str=[1, 2, 1] , _lowerCAmelCase : List[Any]=[2, 2, 4] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Any=2.0 , _lowerCAmelCase : Any=True , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=10 , _lowerCAmelCase : int=8 , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = num_heads
A = window_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = use_absolute_embeddings
A = patch_norm
A = layer_norm_eps
A = initializer_range
A = is_training
A = scope
A = use_labels
A = type_sequence_label_size
A = encoder_stride
def A (self : Dict ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def A (self : Optional[Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A (self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] ):
A = SwinvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase )
A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A (self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
A = SwinvaForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A = 1
A = SwinvaForMaskedImageModeling(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A (self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ):
A = self.type_sequence_label_size
A = SwinvaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A (self : Union[str, Any] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__lowerCAmelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Any ):
A = SwinvaModelTester(self )
A = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
def A (self : Dict ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A (self : int ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A (self : Dict ):
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A (self : Optional[int] ):
pass
def A (self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def A (self : Optional[int] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def A (self : int ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
A = True
A = False
A = True
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.attentions
A = len(self.model_tester.depths )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A = True
A = config.window_size**2
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
A = len(_lowerCAmelCase )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
A = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
A = 2
self.assertEqual(out_len + added_hidden_states , len(_lowerCAmelCase ) )
A = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def A (self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.hidden_states
A = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swinv2 has a different seq_length
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
A = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
A , A , A , A = reshaped_hidden_states[0].shape
A = (
reshaped_hidden_states[0].view(_lowerCAmelCase , _lowerCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A (self : Tuple ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A (self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
def A (self : Optional[int] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def A (self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def A (self : Optional[Any] ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = SwinvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def A (self : Optional[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
A = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A (self : List[str] ):
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A (self : List[str] ):
A = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
_lowerCAmelCase )
A = self.default_image_processor
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
A = model(**_lowerCAmelCase )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
A = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 258 | 1 |
'''simple docstring'''
import os
def __a ( ) ->Union[str, Any]:
"""simple docstring"""
with open(os.path.dirname(UpperCAmelCase ) + """/grid.txt""" ) as f:
A = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCAmelCase ) for x in f.readline().split()] )
A = 0
# right
for i in range(20 ):
for j in range(17 ):
A = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A = temp
# down
for i in range(17 ):
for j in range(20 ):
A = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
A = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A = temp
return maximum
if __name__ == "__main__":
print(solution())
| 258 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
def get_matched_characters(UpperCAmelCase , UpperCAmelCase ) -> str:
A = []
A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
A = int(max(0 , i - limit ) )
A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCAmelCase )
A = f"""{_stra[0:_stra.index(UpperCAmelCase )]} {_stra[_stra.index(UpperCAmelCase ) + 1:]}"""
return "".join(UpperCAmelCase )
# matching characters
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = len(UpperCAmelCase )
# transposition
A = (
len([(ca, ca) for ca, ca in zip(UpperCAmelCase , UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
A = 0.0
else:
A = (
1
/ 3
* (
match_count / len(UpperCAmelCase )
+ match_count / len(UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 258 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : Union[str, Any]="resnet50" , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Union[str, Any]=True , ):
A = parent
A = out_indices if out_indices is not None else [4]
A = stage_names
A = out_features
A = backbone
A = batch_size
A = image_size
A = num_channels
A = use_pretrained_backbone
A = is_training
def A (self : Optional[int] ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = self.get_config()
return config, pixel_values
def A (self : Any ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def A (self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ):
A = TimmBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(_lowerCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def A (self : Tuple ):
A = self.prepare_config_and_inputs()
A , A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __UpperCAmelCase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (TimmBackbone,) if is_torch_available() else ()
__lowerCAmelCase = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Tuple ):
A = TimmBackboneModelTester(self )
A = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def A (self : List[str] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A (self : Dict ):
A = """resnet18"""
A = """microsoft/resnet-18"""
A = AutoBackbone.from_pretrained(_lowerCAmelCase , use_timm_backbone=_lowerCAmelCase )
A = AutoBackbone.from_pretrained(_lowerCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
A = AutoBackbone.from_pretrained(_lowerCAmelCase , use_timm_backbone=_lowerCAmelCase , out_indices=[1, 2, 3] )
A = AutoBackbone.from_pretrained(_lowerCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def A (self : Dict ):
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def A (self : Optional[Any] ):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def A (self : str ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def A (self : Tuple ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def A (self : List[Any] ):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def A (self : Optional[Any] ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def A (self : List[str] ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def A (self : int ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def A (self : int ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def A (self : Dict ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def A (self : int ):
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def A (self : Optional[Any] ):
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def A (self : Tuple ):
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def A (self : Union[str, Any] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A (self : Tuple ):
pass
def A (self : int ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def A (self : List[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = self.has_attentions
# no need to test all models as different heads yield the same functionality
A = self.all_model_classes[0]
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
A = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
A = model(**_lowerCAmelCase )
A = outputs[0][-1]
# Encoder-/Decoder-only models
A = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
A = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def A (self : Any ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(**_lowerCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
A = copy.deepcopy(_lowerCAmelCase )
A = None
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(**_lowerCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
A = copy.deepcopy(_lowerCAmelCase )
A = False
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(**_lowerCAmelCase )
| 258 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_lowerCamelCase : List[str] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_lowerCamelCase : List[Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_lowerCamelCase : Dict = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def A (self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A (self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score
| 258 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {'vocab_file': 'vocab.txt'}
_lowerCamelCase : Dict = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
_lowerCamelCase : int = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
with open(UpperCAmelCase , """r""" ) as f:
A = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__(self : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Tuple="<cls>" , _lowerCAmelCase : str="<pad>" , _lowerCAmelCase : int="<mask>" , _lowerCAmelCase : Dict="<eos>" , **_lowerCAmelCase : Tuple , ):
super().__init__(**_lowerCAmelCase )
A = load_vocab_file(_lowerCAmelCase )
A = dict(enumerate(self.all_tokens ) )
A = {tok: ind for ind, tok in enumerate(self.all_tokens )}
A = unk_token
A = cls_token
A = pad_token
A = mask_token
A = eos_token
A = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A (self : Any , _lowerCAmelCase : int ):
return self._id_to_token.get(_lowerCAmelCase , self.unk_token )
def A (self : Optional[Any] , _lowerCAmelCase : str ):
return self._token_to_id.get(_lowerCAmelCase , self._token_to_id.get(self.unk_token ) )
def A (self : int , _lowerCAmelCase : List[str] , **_lowerCAmelCase : Dict ):
return text.split()
def A (self : Any , _lowerCAmelCase : Tuple=False ):
return len(self._id_to_token )
def A (self : Optional[Any] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def A (self : str , _lowerCAmelCase : str ):
return self._token_to_id.get(_lowerCAmelCase , self._token_to_id.get(self.unk_token ) )
def A (self : int , _lowerCAmelCase : int ):
return self._id_to_token.get(_lowerCAmelCase , self.unk_token )
def A (self : int , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
A = [self.cls_token_id]
A = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A (self : Optional[int] , _lowerCAmelCase : List , _lowerCAmelCase : Optional[List] = None , _lowerCAmelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
A = [1] + ([0] * len(_lowerCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(_lowerCAmelCase ) + [1]
return mask
def A (self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ):
A = os.path.join(_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(_lowerCAmelCase , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def A (self : Optional[int] ):
return self.get_vocab_size(with_added_tokens=_lowerCAmelCase )
def A (self : Any , _lowerCAmelCase : Union[List[str], List[AddedToken]] , _lowerCAmelCase : bool = False ):
return super()._add_tokens(_lowerCAmelCase , special_tokens=_lowerCAmelCase )
| 258 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__lowerCAmelCase = None
class __UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__lowerCAmelCase = PandasConfig
def A (self : Tuple ):
return datasets.DatasetInfo(features=self.config.features )
def A (self : Optional[int] , _lowerCAmelCase : List[Any] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
A = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def A (self : Dict , _lowerCAmelCase : pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A = table_cast(_lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def A (self : List[Any] , _lowerCAmelCase : Optional[Any] ):
for i, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
with open(_lowerCAmelCase , """rb""" ) as f:
A = pa.Table.from_pandas(pd.read_pickle(_lowerCAmelCase ) )
yield i, self._cast_table(_lowerCAmelCase )
| 258 | 1 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_lowerCamelCase : Optional[Any] = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
_lowerCamelCase : Tuple = json.load(f)
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : str , _lowerCAmelCase : Tuple ):
return FSMTTokenizer.from_pretrained(_lowerCAmelCase )
def A (self : Dict , _lowerCAmelCase : str ):
A = FSMTForConditionalGeneration.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def A (self : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
A = F"""facebook/wmt19-{pair}"""
A = self.get_tokenizer(_lowerCAmelCase )
A = self.get_model(_lowerCAmelCase )
A = bleu_data[pair]["""src"""]
A = bleu_data[pair]["""tgt"""]
A = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
A = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
A = tokenizer.batch_decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
A = calculate_bleu(_lowerCAmelCase , _lowerCAmelCase )
print(_lowerCAmelCase )
self.assertGreaterEqual(scores["""bleu"""] , _lowerCAmelCase )
| 258 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def __a ( UpperCAmelCase = "" , ) ->bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def __a ( UpperCAmelCase = "" ) ->bool:
"""simple docstring"""
if len(UpperCAmelCase ) == 0:
return True
A = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
A = {}
for character in lower_case_input_str:
A = character_freq_dict.get(UpperCAmelCase , 0 ) + 1
A = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __a ( UpperCAmelCase = "" ) ->None:
"""simple docstring"""
print("""\nFor string = """ , UpperCAmelCase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(UpperCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_lowerCamelCase : Any = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
_lowerCamelCase : Any = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 258 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Dict = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''encodec'''
def __init__(self : List[Any] , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : List[str]=2_4000 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=128 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : int=[8, 5, 4, 2] , _lowerCAmelCase : Any="weight_norm" , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : str="reflect" , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : Tuple=1024 , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=True , **_lowerCAmelCase : List[str] , ):
A = target_bandwidths
A = sampling_rate
A = audio_channels
A = normalize
A = chunk_length_s
A = overlap
A = hidden_size
A = num_filters
A = num_residual_layers
A = upsampling_ratios
A = norm_type
A = kernel_size
A = last_kernel_size
A = residual_kernel_size
A = dilation_growth_rate
A = use_causal_conv
A = pad_mode
A = compress
A = num_lstm_layers
A = trim_right_ratio
A = codebook_size
A = codebook_dim if codebook_dim is not None else hidden_size
A = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_lowerCAmelCase )
@property
def A (self : List[Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A (self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A (self : Any ):
A = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A (self : List[str] ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(UpperCAmelCase ) )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
A = 0.0
for coeff in reversed(UpperCAmelCase ):
A = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : List[Any] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 258 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | 1 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->list:
"""simple docstring"""
A = len(UpperCAmelCase )
for i in range(1 , UpperCAmelCase ):
A = collection[i]
A = 0
A = i - 1
while low <= high:
A = (low + high) // 2
if val < collection[mid]:
A = mid - 1
else:
A = mid + 1
for j in range(UpperCAmelCase , UpperCAmelCase , -1 ):
A = collection[j - 1]
A = val
return collection
if __name__ == "__main__":
_lowerCamelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 258 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : str ):
A = """ylacombe/bark-small"""
A = tempfile.mkdtemp()
A = """en_speaker_1"""
A = """This is a test string"""
A = """speaker_embeddings_path.json"""
A = """speaker_embeddings"""
def A (self : Optional[Any] , **_lowerCAmelCase : Any ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCAmelCase )
def A (self : Dict ):
shutil.rmtree(self.tmpdirname )
def A (self : Optional[Any] ):
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
A = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def A (self : int ):
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def A (self : Union[str, Any] ):
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
A = 35
A = 2
A = 8
A = {
"""semantic_prompt""": np.ones(_lowerCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
A = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
A = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_lowerCAmelCase , **_lowerCAmelCase )
A = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
A = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
A = processor(text=self.input_string , voice_preset=self.voice_preset )
def A (self : str ):
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=_lowerCAmelCase )
A = processor(text=self.input_string )
A = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 258 | 1 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , A__ , )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = RobertaConfig
__lowerCAmelCase = '''roberta'''
def __init__(self : str , _lowerCAmelCase : Dict ):
super().__init__(_lowerCAmelCase )
A = RobertaEmbeddings(_lowerCAmelCase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , A__ , )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = RobertaConfig
__lowerCAmelCase = '''roberta'''
def __init__(self : Tuple , _lowerCAmelCase : Optional[int] ):
super().__init__(_lowerCAmelCase )
A = config.num_labels
A = config.num_hidden_layers
A = DeeRobertaModel(_lowerCAmelCase )
A = nn.Dropout(config.hidden_dropout_prob )
A = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[int]=-1 , _lowerCAmelCase : Union[str, Any]=False , ):
A = self.num_layers
try:
A = self.roberta(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , position_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase , inputs_embeds=_lowerCAmelCase , )
A = outputs[1]
A = self.dropout(_lowerCAmelCase )
A = self.classifier(_lowerCAmelCase )
A = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A = e.message
A = e.exit_layer
A = outputs[0]
if not self.training:
A = entropy(_lowerCAmelCase )
A = []
A = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A = MSELoss()
A = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A = CrossEntropyLoss()
A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A = []
for highway_exit in outputs[-1]:
A = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowerCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A = MSELoss()
A = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A = CrossEntropyLoss()
A = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowerCAmelCase )
if train_highway:
A = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A = (loss,) + outputs
if not self.training:
A = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 258 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
_lowerCamelCase : List[Any] = 6_3_7_8_1_3_7.0
_lowerCamelCase : List[Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
_lowerCamelCase : Optional[int] = 637_8137
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
A = (AXIS_A - AXIS_B) / AXIS_A
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = radians(UpperCAmelCase )
A = radians(UpperCAmelCase )
# Equation
A = sin((phi_a - phi_a) / 2 )
A = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A = sqrt(sin_sq_phi + (cos(UpperCAmelCase ) * cos(UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 1 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = (CMStochasticIterativeScheduler,)
__lowerCAmelCase = 10
def A (self : int , **_lowerCAmelCase : Union[str, Any] ):
A = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**_lowerCAmelCase )
return config
def A (self : Tuple ):
A = 10
A = self.get_scheduler_config()
A = self.scheduler_classes[0](**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
A = scheduler.timesteps[0]
A = scheduler.timesteps[1]
A = self.dummy_sample
A = 0.1 * sample
A = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
A = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A (self : Any ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def A (self : Any ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_lowerCAmelCase )
def A (self : Optional[Any] ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**_lowerCAmelCase )
A = 1
scheduler.set_timesteps(_lowerCAmelCase )
A = scheduler.timesteps
A = torch.manual_seed(0 )
A = self.dummy_model()
A = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_lowerCAmelCase ):
# 1. scale model input
A = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict noise residual
A = model(_lowerCAmelCase , _lowerCAmelCase )
# 3. predict previous sample x_t-1
A = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
A = pred_prev_sample
A = torch.sum(torch.abs(_lowerCAmelCase ) )
A = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def A (self : Tuple ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**_lowerCAmelCase )
A = [106, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
A = scheduler.timesteps
A = torch.manual_seed(0 )
A = self.dummy_model()
A = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
A = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict noise residual
A = model(_lowerCAmelCase , _lowerCAmelCase )
# 3. predict previous sample x_t-1
A = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
A = pred_prev_sample
A = torch.sum(torch.abs(_lowerCAmelCase ) )
A = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def A (self : List[str] ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**_lowerCAmelCase )
A = [39, 30, 12, 15, 0]
with self.assertRaises(_lowerCAmelCase , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def A (self : Tuple ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**_lowerCAmelCase )
A = [39, 30, 12, 1, 0]
A = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def A (self : List[str] ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**_lowerCAmelCase )
A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 258 |
'''simple docstring'''
import os
import sys
import unittest
_lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : str = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
_lowerCamelCase : Tuple = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Any ):
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = {"""BertModelTest""": """BertModelTester"""}
A = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : str ):
A = get_model_to_test_mapping(_lowerCAmelCase )
A = get_model_to_test_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
| 258 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __UpperCAmelCase ( unittest.TestCase , A__ ):
'''simple docstring'''
def A (self : Dict ):
A = load_tool("""text-to-speech""" )
self.tool.setup()
def A (self : Union[str, Any] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A = self.tool("""hey""" )
A = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def A (self : Optional[int] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A = self.tool("""hey""" )
A = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 258 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if len(UpperCAmelCase ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase , n - 1 )
rec_insertion_sort(UpperCAmelCase , n - 1 )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if index >= len(UpperCAmelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A , A = (
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase , index + 1 )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input('Enter integers separated by spaces: ')
_lowerCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 258 | 1 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __a ( UpperCAmelCase , UpperCAmelCase = "cpu" , UpperCAmelCase = None ) ->None:
"""simple docstring"""
A = torch.load(UpperCAmelCase , map_location=UpperCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
A = v.half()
if save_path is None: # overwrite src_path
A = src_path
torch.save(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 258 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __a ( UpperCAmelCase ) ->Tuple: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __a ( UpperCAmelCase ) ->int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def A (self : Tuple ):
A = {}
A = []
A = 1
A = [1, 2]
A = {"""a""": 1, """b""": 2}
A = {"""a""": [1, 2], """b""": [3, 4]}
A = {"""a""": {"""1""": 1}, """b""": 2}
A = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
A = {}
A = []
A = 2
A = [2, 3]
A = {"""a""": 2, """b""": 3}
A = {"""a""": [2, 3], """b""": [4, 5]}
A = {"""a""": {"""1""": 2}, """b""": 3}
A = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
A = 2
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
A = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
A = {"""a""": 2, """b""": 0, """c""": 2}
A = {
"""a""": np.eye(2 ).astype(_lowerCAmelCase ),
"""b""": np.zeros(3 ).astype(_lowerCAmelCase ),
"""c""": np.ones(2 ).astype(_lowerCAmelCase ),
}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCAmelCase : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase )
def A (self : List[Any] ):
A = {"""a""": 1, """b""": 2}
A = {"""a""": 3, """b""": 4}
A = {"""a""": 5, """b""": 6}
A = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = '''bar'''
A = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_lowerCAmelCase , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
A = {f"""{i}""": i for i in range(UpperCAmelCase )}
A = map_nested(lambda UpperCAmelCase : x + 10 , UpperCAmelCase , num_proc=UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@require_tf
def A (self : Dict ):
import tensorflow as tf
from tensorflow.keras import layers
A = layers.Dense(2 )
def gen_random_output():
A = tf.random.uniform((1, 3) )
return model(_lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A (self : Tuple ):
import torch
def gen_random_output():
A = torch.nn.Linear(3 , 2 )
A = torch.rand(1 , 3 )
return model(_lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A (self : str ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A = gen_random_output()
with temp_seed(42 ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).flatten()
assert output == expected_output
def __a ( ) ->Optional[Any]:
"""simple docstring"""
A = A(x=1 , y="""foobar""" )
A = {"""x""": 1, """y""": """foobar"""}
assert asdict(UpperCAmelCase ) == expected_output
A = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
A = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(UpperCAmelCase ) == expected_output
with pytest.raises(UpperCAmelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return text.split()
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __a ( ) ->Optional[int]:
"""simple docstring"""
with Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(UpperCAmelCase ) == 4
| 258 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) ->Any:
"""simple docstring"""
if attention_mask is None:
A = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
A = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
A = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int]=13 , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Dict=99 , _lowerCAmelCase : Any=16 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Union[str, Any]="relu" , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : List[Any]=20 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : str=1 , _lowerCAmelCase : List[str]=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = encoder_layerdrop
A = decoder_layerdrop
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def A (self : str ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.eos_token_id # Eos Token
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A = input_ids.clamp(self.pad_token_id + 1 )
A = decoder_input_ids.clamp(self.pad_token_id + 1 )
A = self.get_config()
A = prepare_mam_aaa_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def A (self : int ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def A (self : Tuple ):
A , A = self.prepare_config_and_inputs()
return config, inputs_dict
def A (self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = MaMaaaModel(config=_lowerCAmelCase ).get_decoder().to(_lowerCAmelCase ).eval()
A = inputs_dict["""input_ids"""]
A = inputs_dict["""attention_mask"""]
A = inputs_dict["""head_mask"""]
# first forward pass
A = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , head_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
A , A = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A = torch.cat([input_ids, next_tokens] , dim=-1 )
A = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )["""last_hidden_state"""]
A = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase )[
"""last_hidden_state"""
]
# select random slice
A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A = output_from_no_past[:, -3:, random_slice_idx].detach()
A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-2 ) )
def A (self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] ):
A = MaMaaaModel(config=_lowerCAmelCase ).to(_lowerCAmelCase ).eval()
A = model(**_lowerCAmelCase )
A = outputs.encoder_last_hidden_state
A = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
A = model.get_encoder()
encoder.save_pretrained(_lowerCAmelCase )
A = MaMaaaEncoder.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
A = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
A = model.get_decoder()
decoder.save_pretrained(_lowerCAmelCase )
A = MaMaaaDecoder.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
A = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __UpperCAmelCase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__lowerCAmelCase = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def A (self : Dict ):
A = MaMaaaModelTester(self )
A = ConfigTester(self , config_class=_lowerCAmelCase )
def A (self : str ):
self.config_tester.run_common_tests()
def A (self : Union[str, Any] ):
A , A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
A , A = model_class.from_pretrained(_lowerCAmelCase , output_loading_info=_lowerCAmelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def A (self : List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_lowerCAmelCase )
def A (self : Optional[Any] ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_lowerCAmelCase )
def A (self : Optional[int] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
if not self.is_encoder_decoder:
A = inputs["""input_ids"""]
del inputs["input_ids"]
else:
A = inputs["""input_ids"""]
A = inputs.get("""decoder_input_ids""" , _lowerCAmelCase )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , _lowerCAmelCase )
A = model.get_input_embeddings()
if not self.is_encoder_decoder:
A = wte(_lowerCAmelCase )
else:
A = wte(_lowerCAmelCase )
A = wte(_lowerCAmelCase )
with torch.no_grad():
model(**_lowerCAmelCase )[0]
def A (self : Dict ):
A , A = self.model_tester.prepare_config_and_inputs()
A = input_dict["""input_ids"""]
A = input_ids.ne(1 ).to(_lowerCAmelCase )
A = MaMaaaForConditionalGeneration(_lowerCAmelCase ).eval().to(_lowerCAmelCase )
if torch_device == "cuda":
model.half()
model.generate(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
model.generate(num_beams=4 , do_sample=_lowerCAmelCase , early_stopping=_lowerCAmelCase , num_return_sequences=3 )
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return torch.tensor(UpperCAmelCase , dtype=torch.long , device=UpperCAmelCase )
_lowerCamelCase : List[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A (self : Union[str, Any] ):
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def A (self : Optional[Any] ):
A = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(_lowerCAmelCase )
A = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
A = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
A = prepare_mam_aaa_inputs_dict(model.config , _lowerCAmelCase , _lowerCAmelCase )
with torch.no_grad():
A = model(**_lowerCAmelCase )[0]
A = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , _lowerCAmelCase )
# change to expected output here
A = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def A (self : Tuple ):
A = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(_lowerCAmelCase )
# change to intended input
A = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
A = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
A = prepare_mam_aaa_inputs_dict(model.config , _lowerCAmelCase , _lowerCAmelCase )
with torch.no_grad():
A = model(**_lowerCAmelCase )[0]
A = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
# change to expected output here
A = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def A (self : Optional[Any] ):
A = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(_lowerCAmelCase )
A = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
A = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
A = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""pt""" )
A = model.generate(
input_ids=dct["""input_ids"""].to(_lowerCAmelCase ) , attention_mask=dct["""attention_mask"""].to(_lowerCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
A = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
A = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
assert generated == expected_en
| 258 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __a ( ) ->None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 258 | 1 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 258 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : str , _lowerCAmelCase : PriorTransformer , _lowerCAmelCase : CLIPVisionModel , _lowerCAmelCase : CLIPImageProcessor , _lowerCAmelCase : HeunDiscreteScheduler , _lowerCAmelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=_lowerCAmelCase , image_encoder=_lowerCAmelCase , image_processor=_lowerCAmelCase , scheduler=_lowerCAmelCase , renderer=_lowerCAmelCase , )
def A (self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
if latents is None:
A = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
A = latents.to(_lowerCAmelCase )
A = latents * scheduler.init_noise_sigma
return latents
def A (self : Union[str, Any] , _lowerCAmelCase : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A = torch.device(F"""cuda:{gpu_id}""" )
A = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
def A (self : Optional[Any] ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def A (self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
A = torch.cat(_lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_lowerCAmelCase , axis=0 )
if not isinstance(_lowerCAmelCase , torch.Tensor ):
A = self.image_processor(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
A = image.to(dtype=self.image_encoder.dtype , device=_lowerCAmelCase )
A = self.image_encoder(_lowerCAmelCase )["""last_hidden_state"""]
A = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
A = torch.zeros_like(_lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__(self : List[Any] , _lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 25 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : float = 4.0 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ):
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
A = 1
elif isinstance(_lowerCAmelCase , torch.Tensor ):
A = image.shape[0]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A = len(_lowerCAmelCase )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_lowerCAmelCase )}""" )
A = self._execution_device
A = batch_size * num_images_per_prompt
A = guidance_scale > 1.0
A = self._encode_image(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# prior
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
A = self.scheduler.timesteps
A = self.prior.config.num_embeddings
A = self.prior.config.embedding_dim
A = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A = latents.reshape(latents.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
A = self.prior(
_lowerCAmelCase , timestep=_lowerCAmelCase , proj_embedding=_lowerCAmelCase , ).predicted_image_embedding
# remove the variance
A , A = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A , A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A = self.scheduler.step(
_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_lowerCAmelCase )
A = []
for i, latent in enumerate(_lowerCAmelCase ):
print()
A = self.renderer.decode(
latent[None, :] , _lowerCAmelCase , size=_lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_lowerCAmelCase )
A = torch.stack(_lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
A = images.cpu().numpy()
if output_type == "pil":
A = [self.numpy_to_pil(_lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_lowerCAmelCase )
| 258 | 1 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowerCamelCase : Optional[Any] = 'src/transformers'
# Matches is_xxx_available()
_lowerCamelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_lowerCamelCase : Optional[Any] = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowerCamelCase : Tuple = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_lowerCamelCase : Optional[int] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_lowerCamelCase : str = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowerCamelCase : List[str] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowerCamelCase : Optional[int] = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowerCamelCase : List[Any] = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_lowerCamelCase : Any = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_lowerCamelCase : Optional[int] = re.compile(R'^\s*try:')
# Catches a line with else:
_lowerCamelCase : Union[str, Any] = re.compile(R'^\s*else:')
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
if _re_test_backend.search(UpperCAmelCase ) is None:
return None
A = [b[0] for b in _re_backend.findall(UpperCAmelCase )]
backends.sort()
return "_and_".join(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
A = 0
while line_index < len(UpperCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
A = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCAmelCase ):
A = _re_one_line_import_struct.search(UpperCAmelCase ).groups()[0]
A = re.findall("""\[([^\]]+)\]""" , UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
A = _re_import_struct_key_value.search(UpperCAmelCase )
if single_line_import_search is not None:
A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(UpperCAmelCase ) > 0]
objects.extend(UpperCAmelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
A = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
A = lines[line_index]
if _re_import_struct_add_one.search(UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCAmelCase ) is not None:
A = _re_import_struct_add_many.search(UpperCAmelCase ).groups()[0].split(""", """ )
A = [obj[1:-1] for obj in imports if len(UpperCAmelCase ) > 0]
objects.extend(UpperCAmelCase )
elif _re_between_brackets.search(UpperCAmelCase ) is not None:
A = _re_between_brackets.search(UpperCAmelCase ).groups()[0].split(""", """ )
A = [obj[1:-1] for obj in imports if len(UpperCAmelCase ) > 0]
objects.extend(UpperCAmelCase )
elif _re_quote_object.search(UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(UpperCAmelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A = []
while (
line_index < len(UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
A = lines[line_index]
A = _re_import.search(UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
A = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
A = lines[line_index]
A = _re_import.search(UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __a ( UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
def find_duplicates(UpperCAmelCase ):
return [k for k, v in collections.Counter(UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A = []
for key in import_dict_objects.keys():
A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A = """base imports""" if key == """none""" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __a ( ) ->Optional[Any]:
"""simple docstring"""
A = []
for root, _, files in os.walk(UpperCAmelCase ):
if "__init__.py" in files:
A = os.path.join(UpperCAmelCase , """__init__.py""" )
A = parse_init(UpperCAmelCase )
if objects is not None:
A = analyze_results(*UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
A = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(UpperCAmelCase ) )
if len(UpperCAmelCase ) > 0:
raise ValueError("""\n\n""".join(UpperCAmelCase ) )
def __a ( ) ->List[str]:
"""simple docstring"""
A = []
for path, directories, files in os.walk(UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
A = str((Path(UpperCAmelCase ) / folder).relative_to(UpperCAmelCase ) )
A = short_path.replace(os.path.sep , """.""" )
submodules.append(UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
A = str((Path(UpperCAmelCase ) / fname).relative_to(UpperCAmelCase ) )
A = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(UpperCAmelCase )
return submodules
_lowerCamelCase : int = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def __a ( ) ->Optional[int]:
"""simple docstring"""
A = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(UpperCAmelCase , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
A = spec.loader.load_module()
A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCAmelCase ) > 0:
A = """\n""".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 258 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Union[str, Any] ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A = model
A = kwargs.get("""model_save_dir""" , _lowerCAmelCase )
A = kwargs.get("""latest_model_name""" , _lowerCAmelCase )
def __call__(self : Tuple , **_lowerCAmelCase : Optional[Any] ):
A = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def A (_lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A = """CPUExecutionProvider"""
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : List[str] ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A = self.model_save_dir.joinpath(self.latest_model_name )
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def A (self : Tuple , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : str , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Union[bool, str, None]] = None , _lowerCAmelCase : Optional[Union[str, None]] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional["ort.SessionOptions"] = None , **_lowerCAmelCase : Optional[int] , ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
A = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
A = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
A = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
A = Path(_lowerCAmelCase ).parent
A = Path(_lowerCAmelCase ).name
A = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : str , ):
A = None
if len(str(_lowerCAmelCase ).split("""@""" ) ) == 2:
A , A = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 258 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = CycleDiffusionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A (self : int ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A = CLIPTextModel(_lowerCAmelCase )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A (self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=0 ):
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
A = image / 2 + 0.5
if str(_lowerCAmelCase ).startswith("""mps""" ):
A = torch.manual_seed(_lowerCAmelCase )
else:
A = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
A = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def A (self : Any ):
A = """cpu""" # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A (self : str ):
A = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCAmelCase , """half""" ):
A = module.half()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A (self : Optional[int] ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def A (self : Optional[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def A (self : Dict ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A (self : Optional[Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A (self : Optional[int] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 258 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A (self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
A = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A = VideoClassificationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase , top_k=2 )
A = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def A (self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
for example in examples:
A = video_classifier(_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
] , )
@require_torch
def A (self : Optional[Any] ):
A = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
A = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
A = pipeline(
"""video-classification""" , model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , frame_sampling_rate=4 )
A = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A = video_classifier(_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , )
A = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] , )
@require_tf
def A (self : List[Any] ):
pass
| 258 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase : Optional[Any] = 50_0000
_lowerCamelCase , _lowerCamelCase : Optional[Any] = os.path.split(__file__)
_lowerCamelCase : Tuple = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __a ( UpperCAmelCase , **UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = dataset.map(**UpperCAmelCase )
@get_duration
def __a ( UpperCAmelCase , **UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = dataset.filter(**UpperCAmelCase )
def __a ( ) ->Tuple:
"""simple docstring"""
A = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
A = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
A = generate_example_dataset(
os.path.join(UpperCAmelCase , """dataset.arrow""" ) , UpperCAmelCase , num_examples=UpperCAmelCase )
A = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=UpperCAmelCase )
def tokenize(UpperCAmelCase ):
return tokenizer(examples["""text"""] )
A = map(UpperCAmelCase )
A = map(UpperCAmelCase , batched=UpperCAmelCase )
A = map(UpperCAmelCase , function=lambda UpperCAmelCase : None , batched=UpperCAmelCase )
with dataset.formatted_as(type="""numpy""" ):
A = map(UpperCAmelCase , function=lambda UpperCAmelCase : None , batched=UpperCAmelCase )
with dataset.formatted_as(type="""pandas""" ):
A = map(UpperCAmelCase , function=lambda UpperCAmelCase : None , batched=UpperCAmelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
A = map(UpperCAmelCase , function=lambda UpperCAmelCase : None , batched=UpperCAmelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
A = map(UpperCAmelCase , function=lambda UpperCAmelCase : None , batched=UpperCAmelCase )
A = map(UpperCAmelCase , function=UpperCAmelCase , batched=UpperCAmelCase )
A = filter(UpperCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCAmelCase , """wb""" ) as f:
f.write(json.dumps(UpperCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 258 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCamelCase : int = 10
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
for i in range(UpperCAmelCase , UpperCAmelCase ):
if array[i] == target:
return i
return -1
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = 0
A = len(UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A = one_third - 1
elif array[two_third] < target:
A = two_third + 1
else:
A = one_third + 1
A = two_third - 1
else:
return -1
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCAmelCase , one_third - 1 , UpperCAmelCase , UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCAmelCase , UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : str = input('Enter numbers separated by comma:\n').strip()
_lowerCamelCase : str = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCamelCase : Optional[int] = int(input('Enter the number to be found in the list:\n').strip())
_lowerCamelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCamelCase : Union[str, Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('Not found')
| 258 | 1 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __a ( ) ->int:
"""simple docstring"""
A , A = 9, 14 # noqa: F841
A = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A = defaultdict(UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A = mst(UpperCAmelCase )
A = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A = tuple(answer[:2] )
A = tuple(edge[::-1] )
assert edge in result or reverse in result
| 258 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = CycleDiffusionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A (self : int ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A = CLIPTextModel(_lowerCAmelCase )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A (self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=0 ):
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
A = image / 2 + 0.5
if str(_lowerCAmelCase ).startswith("""mps""" ):
A = torch.manual_seed(_lowerCAmelCase )
else:
A = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
A = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def A (self : Any ):
A = """cpu""" # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A (self : str ):
A = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCAmelCase , """half""" ):
A = module.half()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A (self : Optional[int] ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def A (self : Optional[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def A (self : Dict ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A (self : Optional[Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A (self : Optional[int] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 258 | 1 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''encodec'''
def __init__(self : List[Any] , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : List[str]=2_4000 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=128 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : int=[8, 5, 4, 2] , _lowerCAmelCase : Any="weight_norm" , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : str="reflect" , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : Tuple=1024 , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=True , **_lowerCAmelCase : List[str] , ):
A = target_bandwidths
A = sampling_rate
A = audio_channels
A = normalize
A = chunk_length_s
A = overlap
A = hidden_size
A = num_filters
A = num_residual_layers
A = upsampling_ratios
A = norm_type
A = kernel_size
A = last_kernel_size
A = residual_kernel_size
A = dilation_growth_rate
A = use_causal_conv
A = pad_mode
A = compress
A = num_lstm_layers
A = trim_right_ratio
A = codebook_size
A = codebook_dim if codebook_dim is not None else hidden_size
A = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_lowerCAmelCase )
@property
def A (self : List[Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A (self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A (self : Any ):
A = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A (self : List[str] ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Tuple , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A (self : Any , _lowerCAmelCase : str=None ):
A = {}
if top_k is not None:
A = top_k
return {}, {}, postprocess_params
def __call__(self : str , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : int ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = load_image(_lowerCAmelCase )
A = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A (self : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
A = self.model(**_lowerCAmelCase )
return model_outputs
def A (self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=5 ):
if top_k > self.model.config.num_labels:
A = self.model.config.num_labels
if self.framework == "pt":
A = model_outputs.logits.softmax(-1 )[0]
A , A = probs.topk(_lowerCAmelCase )
elif self.framework == "tf":
A = stable_softmax(model_outputs.logits , axis=-1 )[0]
A = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
A , A = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
A = scores.tolist()
A = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCAmelCase , _lowerCAmelCase )]
| 258 | 1 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : int = 32 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 255 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowerCAmelCase : Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Any=7 , _lowerCAmelCase : Tuple=30 , _lowerCAmelCase : Optional[Any]=400 , _lowerCAmelCase : Optional[int]=3 , ):
A = parent
A = do_resize
A = size if size is not None else {"""shortest_edge""": 288}
A = size_divisor
A = do_rescale
A = rescale_factor
A = do_normalize
A = do_center_crop
A = image_mean
A = image_std
A = do_pad
A = batch_size
A = num_channels
A = min_resolution
A = max_resolution
def A (self : Union[str, Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A (self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]=False ):
if not batched:
A = self.size["""shortest_edge"""]
A = image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image ):
A , A = image.size
else:
A , A = image.shape[1], image.shape[2]
A = size / min(_lowerCAmelCase , _lowerCAmelCase )
if h < w:
A , A = size, scale * w
else:
A , A = scale * h, size
A = int((1333 / 800) * size )
if max(_lowerCAmelCase , _lowerCAmelCase ) > max_size:
A = max_size / max(_lowerCAmelCase , _lowerCAmelCase )
A = newh * scale
A = neww * scale
A , A = int(newh + 0.5 ), int(neww + 0.5 )
A , A = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A = []
for image in image_inputs:
A , A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0]
A = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BridgeTowerImageProcessor if is_vision_available() else None
def A (self : Optional[Any] ):
A = BridgeTowerImageProcessingTester(self )
@property
def A (self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def A (self : Tuple ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size_divisor""" ) )
def A (self : Optional[int] ):
pass
def A (self : Any ):
# Initialize image processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A , A = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
A , A = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A (self : List[str] ):
# Initialize image processor
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A , A = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
A , A = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A (self : Optional[Any] ):
# Initialize image processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A , A = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
A , A = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 258 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 1 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if len(UpperCAmelCase ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase , n - 1 )
rec_insertion_sort(UpperCAmelCase , n - 1 )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if index >= len(UpperCAmelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A , A = (
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase , index + 1 )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input('Enter integers separated by spaces: ')
_lowerCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 258 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BioGptTokenizer
__lowerCAmelCase = False
def A (self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def A (self : Tuple , _lowerCAmelCase : List[str] ):
A = """lower newer"""
A = """lower newer"""
return input_text, output_text
def A (self : List[Any] ):
A = BioGptTokenizer(self.vocab_file , self.merges_file )
A = """lower"""
A = ["""low""", """er</w>"""]
A = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
A = tokens + ["""<unk>"""]
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def A (self : Union[str, Any] ):
A = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 258 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_lowerCamelCase : List[Any] = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_lowerCamelCase : Any = 'UperNetConfig'
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Union[int, Tuple[int, int]] , _lowerCAmelCase : Union[int, Tuple[int, int], str] = 0 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
A = nn.Convad(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , kernel_size=_lowerCAmelCase , padding=_lowerCAmelCase , bias=_lowerCAmelCase , dilation=_lowerCAmelCase , )
A = nn.BatchNormad(_lowerCAmelCase )
A = nn.ReLU()
def A (self : str , _lowerCAmelCase : torch.Tensor ):
A = self.conv(_lowerCAmelCase )
A = self.batch_norm(_lowerCAmelCase )
A = self.activation(_lowerCAmelCase )
return output
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self : str , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
super().__init__()
A = [
nn.AdaptiveAvgPoolad(_lowerCAmelCase ),
UperNetConvModule(_lowerCAmelCase , _lowerCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : Any , _lowerCAmelCase : torch.Tensor ):
A = input
for layer in self.layers:
A = layer(_lowerCAmelCase )
return hidden_state
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self : Dict , _lowerCAmelCase : Tuple[int, ...] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : bool ):
super().__init__()
A = pool_scales
A = align_corners
A = in_channels
A = channels
A = []
for i, pool_scale in enumerate(_lowerCAmelCase ):
A = UperNetPyramidPoolingBlock(pool_scale=_lowerCAmelCase , in_channels=_lowerCAmelCase , channels=_lowerCAmelCase )
self.blocks.append(_lowerCAmelCase )
self.add_module(str(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : Union[str, Any] , _lowerCAmelCase : torch.Tensor ):
A = []
for ppm in self.blocks:
A = ppm(_lowerCAmelCase )
A = nn.functional.interpolate(
_lowerCAmelCase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(_lowerCAmelCase )
return ppm_outs
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
super().__init__()
A = config
A = config.pool_scales # e.g. (1, 2, 3, 6)
A = in_channels
A = config.hidden_size
A = False
A = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
A = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
A = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
A = nn.ModuleList()
A = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
A = UperNetConvModule(_lowerCAmelCase , self.channels , kernel_size=1 )
A = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_lowerCAmelCase )
self.fpn_convs.append(_lowerCAmelCase )
A = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def A (self : Union[str, Any] ):
self.apply(self._init_weights )
def A (self : Optional[int] , _lowerCAmelCase : List[Any] ):
if isinstance(_lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A (self : Tuple , _lowerCAmelCase : List[Any] ):
A = inputs[-1]
A = [x]
psp_outs.extend(self.psp_modules(_lowerCAmelCase ) )
A = torch.cat(_lowerCAmelCase , dim=1 )
A = self.bottleneck(_lowerCAmelCase )
return output
def A (self : Tuple , _lowerCAmelCase : torch.Tensor ):
# build laterals
A = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_lowerCAmelCase ) )
# build top-down path
A = len(_lowerCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
A = laterals[i - 1].shape[2:]
A = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_lowerCAmelCase , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
A = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
A = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
A = torch.cat(_lowerCAmelCase , dim=1 )
A = self.fpn_bottleneck(_lowerCAmelCase )
A = self.classifier(_lowerCAmelCase )
return output
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self : str , _lowerCAmelCase : Any , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
A = config
A = config.auxiliary_in_channels
A = config.auxiliary_channels
A = config.auxiliary_num_convs
A = config.auxiliary_concat_input
A = in_index
A = (kernel_size // 2) * dilation
A = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_lowerCAmelCase , padding=_lowerCAmelCase , dilation=_lowerCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_lowerCAmelCase , padding=_lowerCAmelCase , dilation=_lowerCAmelCase ) )
if self.num_convs == 0:
A = nn.Identity()
else:
A = nn.Sequential(*_lowerCAmelCase )
if self.concat_input:
A = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_lowerCAmelCase , padding=kernel_size // 2 )
A = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def A (self : Tuple ):
self.apply(self._init_weights )
def A (self : str , _lowerCAmelCase : Any ):
if isinstance(_lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A (self : Tuple , _lowerCAmelCase : torch.Tensor ):
# just take the relevant feature maps
A = encoder_hidden_states[self.in_index]
A = self.convs(_lowerCAmelCase )
if self.concat_input:
A = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
A = self.classifier(_lowerCAmelCase )
return output
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = UperNetConfig
__lowerCAmelCase = '''pixel_values'''
__lowerCAmelCase = True
def A (self : Tuple , _lowerCAmelCase : Union[str, Any] ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def A (self : List[str] ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def A (self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : str=False ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = value
_lowerCamelCase : Any = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase : Union[str, Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , A__ , )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Optional[int] , _lowerCAmelCase : List[str] ):
super().__init__(_lowerCAmelCase )
A = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
A = UperNetHead(_lowerCAmelCase , in_channels=self.backbone.channels )
A = UperNetFCNHead(_lowerCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def A (self : int , _lowerCAmelCase : Optional[torch.Tensor] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[torch.Tensor] = None , _lowerCAmelCase : Optional[bool] = None , ):
A = return_dict if return_dict is not None else self.config.use_return_dict
A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A = output_attentions if output_attentions is not None else self.config.output_attentions
A = self.backbone.forward_with_filtered_kwargs(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = outputs.feature_maps
A = self.decode_head(_lowerCAmelCase )
A = nn.functional.interpolate(_lowerCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_lowerCAmelCase )
A = None
if self.auxiliary_head is not None:
A = self.auxiliary_head(_lowerCAmelCase )
A = nn.functional.interpolate(
_lowerCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_lowerCAmelCase )
A = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
A = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
A = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
A = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
A = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
A = (logits,) + outputs[1:]
else:
A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 258 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return np.maximum(0 , UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258 | 1 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowerCamelCase : Tuple = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__lowerCAmelCase = None
def __a ( UpperCAmelCase , UpperCAmelCase , ) ->Tuple:
"""simple docstring"""
import pyspark
def generate_fn():
A = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
A = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" )
A = partition_df.collect()
A = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class __UpperCAmelCase ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__(self : Optional[int] , _lowerCAmelCase : "pyspark.sql.DataFrame" , _lowerCAmelCase : List[Any]=None , ):
A = df
A = partition_order or range(self.df.rdd.getNumPartitions() )
A = _generate_iterable_examples(self.df , self.partition_order )
def __iter__(self : str ):
yield from self.generate_examples_fn()
def A (self : Union[str, Any] , _lowerCAmelCase : np.random.Generator ):
A = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
A = self.split_shard_indices_by_worker(_lowerCAmelCase , _lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=_lowerCAmelCase )
@property
def A (self : List[str] ):
return len(self.partition_order )
class __UpperCAmelCase ( datasets.DatasetBuilder ):
'''simple docstring'''
__lowerCAmelCase = SparkConfig
def __init__(self : int , _lowerCAmelCase : "pyspark.sql.DataFrame" , _lowerCAmelCase : str = None , _lowerCAmelCase : str = None , **_lowerCAmelCase : Optional[Any] , ):
import pyspark
A = pyspark.sql.SparkSession.builder.getOrCreate()
A = df
A = working_dir
super().__init__(
cache_dir=_lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **_lowerCAmelCase , )
def A (self : Union[str, Any] ):
# Returns the path of the created file.
def create_cache_and_write_probe(_lowerCAmelCase : List[Any] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=_lowerCAmelCase )
A = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_lowerCAmelCase , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
A = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_lowerCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def A (self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def A (self : Tuple , _lowerCAmelCase : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def A (self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(_lowerCAmelCase : List[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
A = self.df.count()
A = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
A = (
self.df.limit(_lowerCAmelCase )
.repartition(1 )
.mapInArrow(_lowerCAmelCase , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
A = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
A = min(_lowerCAmelCase , int(approx_total_size / max_shard_size ) )
A = self.df.repartition(_lowerCAmelCase )
def A (self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : int , ):
import pyspark
A = ParquetWriter if file_format == """parquet""" else ArrowWriter
A = os.path.join(self._working_dir , os.path.basename(_lowerCAmelCase ) ) if self._working_dir else fpath
A = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
A = self.config.features
A = self._writer_batch_size
A = self._fs.storage_options
def write_arrow(_lowerCAmelCase : int ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
A = pyspark.TaskContext().taskAttemptId()
A = next(_lowerCAmelCase , _lowerCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
A = 0
A = writer_class(
features=_lowerCAmelCase , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=_lowerCAmelCase , storage_options=_lowerCAmelCase , embed_local_files=_lowerCAmelCase , )
A = pa.Table.from_batches([first_batch] )
writer.write_table(_lowerCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
A , A = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
A = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=_lowerCAmelCase , storage_options=_lowerCAmelCase , embed_local_files=_lowerCAmelCase , )
A = pa.Table.from_batches([batch] )
writer.write_table(_lowerCAmelCase )
if writer._num_bytes > 0:
A , A = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_lowerCAmelCase ) ):
A = os.path.join(os.path.dirname(_lowerCAmelCase ) , os.path.basename(_lowerCAmelCase ) )
shutil.move(_lowerCAmelCase , _lowerCAmelCase )
A = (
self.df.mapInArrow(_lowerCAmelCase , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def A (self : List[str] , _lowerCAmelCase : "datasets.SplitGenerator" , _lowerCAmelCase : str = "arrow" , _lowerCAmelCase : Optional[Union[str, int]] = None , _lowerCAmelCase : Optional[int] = None , **_lowerCAmelCase : List[Any] , ):
self._validate_cache_dir()
A = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_lowerCAmelCase )
A = not is_remote_filesystem(self._fs )
A = os.path.join if is_local else posixpath.join
A = """-TTTTT-SSSSS-of-NNNNN"""
A = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
A = path_join(self._output_dir , _lowerCAmelCase )
A = 0
A = 0
A = 0
A = []
A = []
for task_id, content in self._prepare_split_single(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_lowerCAmelCase )
A = total_num_examples
A = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
A = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
A = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , ):
rename(
_lowerCAmelCase , fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , F"""{global_shard_id:05d}""" ).replace("""NNNNN""" , F"""{total_shards:05d}""" ) , )
A = []
A = 0
for i in range(len(_lowerCAmelCase ) ):
A , A = task_id_and_num_shards[i]
for shard_id in range(_lowerCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_lowerCAmelCase , len(_lowerCAmelCase ) ).map(lambda _lowerCAmelCase : _rename_shard(*_lowerCAmelCase ) ).collect()
else:
# don't use any pattern
A = 0
A = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace(_lowerCAmelCase , """""" ) , )
def A (self : str , _lowerCAmelCase : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 258 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=13 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : str=[1, 2, 1] , _lowerCAmelCase : List[Any]=[2, 2, 4] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Any=2.0 , _lowerCAmelCase : Any=True , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=10 , _lowerCAmelCase : int=8 , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = num_heads
A = window_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = use_absolute_embeddings
A = patch_norm
A = layer_norm_eps
A = initializer_range
A = is_training
A = scope
A = use_labels
A = type_sequence_label_size
A = encoder_stride
def A (self : Dict ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def A (self : Optional[Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A (self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] ):
A = SwinvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase )
A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A (self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
A = SwinvaForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A = 1
A = SwinvaForMaskedImageModeling(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A (self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ):
A = self.type_sequence_label_size
A = SwinvaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A (self : Union[str, Any] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__lowerCAmelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Any ):
A = SwinvaModelTester(self )
A = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
def A (self : Dict ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A (self : int ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A (self : Dict ):
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A (self : Optional[int] ):
pass
def A (self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def A (self : Optional[int] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def A (self : int ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
A = True
A = False
A = True
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.attentions
A = len(self.model_tester.depths )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A = True
A = config.window_size**2
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
A = len(_lowerCAmelCase )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
A = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
A = 2
self.assertEqual(out_len + added_hidden_states , len(_lowerCAmelCase ) )
A = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def A (self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.hidden_states
A = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swinv2 has a different seq_length
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
A = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
A , A , A , A = reshaped_hidden_states[0].shape
A = (
reshaped_hidden_states[0].view(_lowerCAmelCase , _lowerCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A (self : Tuple ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A (self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
def A (self : Optional[int] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def A (self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def A (self : Optional[Any] ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = SwinvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def A (self : Optional[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
A = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A (self : List[str] ):
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A (self : List[str] ):
A = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
_lowerCAmelCase )
A = self.default_image_processor
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
A = model(**_lowerCAmelCase )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
A = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 258 | 1 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
A = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
_lowerCamelCase : int = 701
_lowerCamelCase : Tuple = 10_0000_0000
_lowerCamelCase : List[str] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 258 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
def get_matched_characters(UpperCAmelCase , UpperCAmelCase ) -> str:
A = []
A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
A = int(max(0 , i - limit ) )
A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCAmelCase )
A = f"""{_stra[0:_stra.index(UpperCAmelCase )]} {_stra[_stra.index(UpperCAmelCase ) + 1:]}"""
return "".join(UpperCAmelCase )
# matching characters
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = len(UpperCAmelCase )
# transposition
A = (
len([(ca, ca) for ca, ca in zip(UpperCAmelCase , UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
A = 0.0
else:
A = (
1
/ 3
* (
match_count / len(UpperCAmelCase )
+ match_count / len(UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 258 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = KandinskyVaaPriorPipeline
__lowerCAmelCase = ['''prompt''']
__lowerCAmelCase = ['''prompt''', '''negative_prompt''']
__lowerCAmelCase = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__lowerCAmelCase = False
@property
def A (self : Tuple ):
return 32
@property
def A (self : Union[str, Any] ):
return 32
@property
def A (self : List[str] ):
return self.time_input_dim
@property
def A (self : List[Any] ):
return self.time_input_dim * 4
@property
def A (self : Optional[int] ):
return 100
@property
def A (self : Dict ):
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def A (self : Dict ):
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def A (self : Optional[int] ):
torch.manual_seed(0 )
A = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A = PriorTransformer(**_lowerCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def A (self : Any ):
torch.manual_seed(0 )
A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A = CLIPVisionModelWithProjection(_lowerCAmelCase )
return model
@property
def A (self : Union[str, Any] ):
A = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowerCAmelCase , do_normalize=_lowerCAmelCase , do_resize=_lowerCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def A (self : int ):
A = self.dummy_prior
A = self.dummy_image_encoder
A = self.dummy_text_encoder
A = self.dummy_tokenizer
A = self.dummy_image_processor
A = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowerCAmelCase , clip_sample_range=10.0 , )
A = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def A (self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : str=0 ):
if str(_lowerCAmelCase ).startswith("""mps""" ):
A = torch.manual_seed(_lowerCAmelCase )
else:
A = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
A = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def A (self : Dict ):
A = """cpu"""
A = self.get_dummy_components()
A = self.pipeline_class(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
A = output.image_embeds
A = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
A = image[0, -10:]
A = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A (self : Optional[Any] ):
A = torch_device == """cpu"""
A = True
A = False
self._test_inference_batch_single_identical(
test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , test_mean_pixel_difference=_lowerCAmelCase , )
@skip_mps
def A (self : Optional[int] ):
A = torch_device == """cpu"""
A = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowerCAmelCase , test_mean_pixel_difference=_lowerCAmelCase , )
| 258 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_lowerCamelCase : List[str] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_lowerCamelCase : List[Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_lowerCamelCase : Dict = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def A (self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A (self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score
| 258 | 1 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCamelCase : Union[str, Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_lowerCamelCase : Union[str, Any] = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.1_5},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
_lowerCamelCase : int = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowerCamelCase : str = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_lowerCamelCase : int = 'allenai'
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
A = dict((re.sub(R"""@@$""" , """""" , UpperCAmelCase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCAmelCase ), v) for k, v in d.items() )
A = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
A = d[k] # restore
return da
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
assert os.path.exists(UpperCAmelCase )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
A = basename(UpperCAmelCase )
A = dirname(UpperCAmelCase )
A = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
A = cls.hub_models()
A = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
A = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
A = hub_utils.from_pretrained(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , archive_map=UpperCAmelCase , **UpperCAmelCase )
A = vars(chkpt["""args"""]["""model"""] )
A = args["""source_lang"""]
A = args["""target_lang"""]
A = dirname(UpperCAmelCase )
A = basename(UpperCAmelCase )
# dicts
A = os.path.join(UpperCAmelCase , f"""dict.{src_lang}.txt""" )
A = os.path.join(UpperCAmelCase , f"""dict.{tgt_lang}.txt""" )
A = Dictionary.load(UpperCAmelCase )
A = rewrite_dict_keys(src_dict.indices )
A = len(UpperCAmelCase )
A = os.path.join(UpperCAmelCase , """vocab-src.json""" )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
A = True
for k in src_vocab.keys():
if not k.islower():
A = False
break
A = Dictionary.load(UpperCAmelCase )
A = rewrite_dict_keys(tgt_dict.indices )
A = len(UpperCAmelCase )
A = os.path.join(UpperCAmelCase , """vocab-tgt.json""" )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# merges_file (bpecodes)
A = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
A = os.path.join(UpperCAmelCase , UpperCAmelCase )
if os.path.exists(UpperCAmelCase ):
break
with open(UpperCAmelCase , encoding="""utf-8""" ) as fin:
A = fin.read()
A = re.sub(R""" \d+$""" , """""" , UpperCAmelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fout:
fout.write(UpperCAmelCase )
# model config
A = os.path.join(UpperCAmelCase , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args["bpe"]}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args["tokenizer"]}"""
A = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
A = 5
A = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
A = best_score_hparams[model_dir]["""length_penalty"""]
else:
A = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# tokenizer config
A = os.path.join(UpperCAmelCase , UpperCAmelCase )
A = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# model
A = chkpt["""models"""][0]
A = model.state_dict()
# rename keys to start with 'model.'
A = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
A = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase , UpperCAmelCase )
A = FSMTConfig.from_pretrained(UpperCAmelCase )
A = FSMTForConditionalGeneration(UpperCAmelCase )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
# save
A = os.path.join(UpperCAmelCase , UpperCAmelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(UpperCAmelCase , UpperCAmelCase )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCamelCase : Any = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 258 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__lowerCAmelCase = None
class __UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__lowerCAmelCase = PandasConfig
def A (self : Tuple ):
return datasets.DatasetInfo(features=self.config.features )
def A (self : Optional[int] , _lowerCAmelCase : List[Any] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
A = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def A (self : Dict , _lowerCAmelCase : pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A = table_cast(_lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def A (self : List[Any] , _lowerCAmelCase : Optional[Any] ):
for i, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
with open(_lowerCAmelCase , """rb""" ) as f:
A = pa.Table.from_pandas(pd.read_pickle(_lowerCAmelCase ) )
yield i, self._cast_table(_lowerCAmelCase )
| 258 | 1 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCamelCase : int = 10
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
for i in range(UpperCAmelCase , UpperCAmelCase ):
if array[i] == target:
return i
return -1
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = 0
A = len(UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A = one_third - 1
elif array[two_third] < target:
A = two_third + 1
else:
A = one_third + 1
A = two_third - 1
else:
return -1
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCAmelCase , one_third - 1 , UpperCAmelCase , UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCAmelCase , UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : str = input('Enter numbers separated by comma:\n').strip()
_lowerCamelCase : str = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCamelCase : Optional[int] = int(input('Enter the number to be found in the list:\n').strip())
_lowerCamelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCamelCase : Union[str, Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('Not found')
| 258 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def __a ( UpperCAmelCase = "" , ) ->bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def __a ( UpperCAmelCase = "" ) ->bool:
"""simple docstring"""
if len(UpperCAmelCase ) == 0:
return True
A = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
A = {}
for character in lower_case_input_str:
A = character_freq_dict.get(UpperCAmelCase , 0 ) + 1
A = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __a ( UpperCAmelCase = "" ) ->None:
"""simple docstring"""
print("""\nFor string = """ , UpperCAmelCase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(UpperCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_lowerCamelCase : Any = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
_lowerCamelCase : Any = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 258 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = {'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : Union[str, Any] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = None
def __init__(self : Dict , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[Any]="<unk>" , _lowerCAmelCase : Any="<s>" , _lowerCAmelCase : Union[str, Any]="</s>" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : List[Any]=False , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , **_lowerCAmelCase , )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
A = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
A = add_prefix_space
A = pre_tok_class(**_lowerCAmelCase )
A = add_prefix_space
def A (self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Union[str, Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : List[Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : Dict ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def A (self : int , _lowerCAmelCase : "Conversation" ):
A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] )
if len(_lowerCAmelCase ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
return input_ids
| 258 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''encodec'''
def __init__(self : List[Any] , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : List[str]=2_4000 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=128 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : int=[8, 5, 4, 2] , _lowerCAmelCase : Any="weight_norm" , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : str="reflect" , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : Tuple=1024 , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=True , **_lowerCAmelCase : List[str] , ):
A = target_bandwidths
A = sampling_rate
A = audio_channels
A = normalize
A = chunk_length_s
A = overlap
A = hidden_size
A = num_filters
A = num_residual_layers
A = upsampling_ratios
A = norm_type
A = kernel_size
A = last_kernel_size
A = residual_kernel_size
A = dilation_growth_rate
A = use_causal_conv
A = pad_mode
A = compress
A = num_lstm_layers
A = trim_right_ratio
A = codebook_size
A = codebook_dim if codebook_dim is not None else hidden_size
A = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_lowerCAmelCase )
@property
def A (self : List[Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A (self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A (self : Any ):
A = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A (self : List[str] ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258 | 1 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
_lowerCamelCase : Any = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
_lowerCamelCase : List[Any] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
_lowerCamelCase : Dict = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False , ) ->Tuple:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
A = new_id
# turn into Numpy arrays
A = np.array(UpperCAmelCase )
A = np.array(UpperCAmelCase )
if reduce_labels:
A = 255
A = label - 1
A = 255
A = label != ignore_index
A = np.not_equal(UpperCAmelCase , UpperCAmelCase )
A = pred_label[mask]
A = np.array(UpperCAmelCase )[mask]
A = pred_label[pred_label == label]
A = np.histogram(UpperCAmelCase , bins=UpperCAmelCase , range=(0, num_labels - 1) )[0]
A = np.histogram(UpperCAmelCase , bins=UpperCAmelCase , range=(0, num_labels - 1) )[0]
A = np.histogram(UpperCAmelCase , bins=UpperCAmelCase , range=(0, num_labels - 1) )[0]
A = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False , ) ->List[str]:
"""simple docstring"""
A = np.zeros((num_labels,) , dtype=np.floataa )
A = np.zeros((num_labels,) , dtype=np.floataa )
A = np.zeros((num_labels,) , dtype=np.floataa )
A = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCAmelCase , UpperCAmelCase ):
A , A , A , A = intersect_and_union(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , ) ->Any:
"""simple docstring"""
A , A , A , A = total_intersect_and_union(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# compute metrics
A = {}
A = total_area_intersect.sum() / total_area_label.sum()
A = total_area_intersect / total_area_union
A = total_area_intersect / total_area_label
A = np.nanmean(UpperCAmelCase )
A = np.nanmean(UpperCAmelCase )
A = all_acc
A = iou
A = acc
if nan_to_num is not None:
A = {metric: np.nan_to_num(UpperCAmelCase , nan=UpperCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def A (self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def A (self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : bool , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Dict[int, int]] = None , _lowerCAmelCase : bool = False , ):
A = mean_iou(
results=_lowerCAmelCase , gt_seg_maps=_lowerCAmelCase , num_labels=_lowerCAmelCase , ignore_index=_lowerCAmelCase , nan_to_num=_lowerCAmelCase , label_map=_lowerCAmelCase , reduce_labels=_lowerCAmelCase , )
return iou_result
| 258 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Union[str, Any]=7 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : str=True , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]=99 , _lowerCAmelCase : str=36 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : Dict=37 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Optional[Any]=512 , _lowerCAmelCase : Any=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : str=6 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : int=None , _lowerCAmelCase : Union[str, Any]=1000 , ):
A = parent
A = batch_size
A = num_channels
A = image_size
A = patch_size
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = coordinate_size
A = shape_size
A = num_labels
A = num_choices
A = scope
A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A = text_seq_length
A = (image_size // patch_size) ** 2 + 1
A = self.text_seq_length + self.image_seq_length
def A (self : int ):
A = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
A = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A = bbox[i, j, 3]
A = bbox[i, j, 1]
A = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
A = bbox[i, j, 2]
A = bbox[i, j, 0]
A = tmp_coordinate
A = tf.constant(_lowerCAmelCase )
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.text_seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
A = TFLayoutLMvaModel(config=_lowerCAmelCase )
# text + image
A = model(_lowerCAmelCase , pixel_values=_lowerCAmelCase , training=_lowerCAmelCase )
A = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , training=_lowerCAmelCase , )
A = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A = model({"""pixel_values""": pixel_values} , training=_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def A (self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
A = self.num_labels
A = TFLayoutLMvaForSequenceClassification(config=_lowerCAmelCase )
A = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A (self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
A = self.num_labels
A = TFLayoutLMvaForTokenClassification(config=_lowerCAmelCase )
A = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def A (self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] ):
A = 2
A = TFLayoutLMvaForQuestionAnswering(config=_lowerCAmelCase )
A = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , training=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A (self : Tuple ):
A = self.prepare_config_and_inputs()
((A) , (A) , (A) , (A) , (A) , (A) , (A) , (A)) = config_and_inputs
A = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
return True
def A (self : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=False ):
A = copy.deepcopy(_lowerCAmelCase )
if model_class in get_values(_lowerCAmelCase ):
A = {
k: tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_lowerCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
A = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowerCAmelCase ):
A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowerCAmelCase ):
A = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowerCAmelCase ):
A = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def A (self : int ):
A = TFLayoutLMvaModelTester(self )
A = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def A (self : str ):
self.config_tester.run_common_tests()
def A (self : Dict ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
if getattr(_lowerCAmelCase , """hf_compute_loss""" , _lowerCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
A = self._prepare_for_class(inputs_dict.copy() , _lowerCAmelCase , return_labels=_lowerCAmelCase )
A = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_lowerCAmelCase )[0]
]
A = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
A = self._prepare_for_class(inputs_dict.copy() , _lowerCAmelCase , return_labels=_lowerCAmelCase )
A = prepared_for_class.pop("""input_ids""" )
A = model(_lowerCAmelCase , **_lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
A = self._prepare_for_class(inputs_dict.copy() , _lowerCAmelCase , return_labels=_lowerCAmelCase )
A = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
A = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
A = -100
A = tf.convert_to_tensor(_lowerCAmelCase )
A = model(_lowerCAmelCase , **_lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
A = self._prepare_for_class(inputs_dict.copy() , _lowerCAmelCase , return_labels=_lowerCAmelCase )
A = model(_lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
A = self._prepare_for_class(inputs_dict.copy() , _lowerCAmelCase , return_labels=_lowerCAmelCase )
# Get keys that were added with the _prepare_for_class function
A = prepared_for_class.keys() - inputs_dict.keys()
A = inspect.signature(model.call ).parameters
A = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
A = {0: """input_ids"""}
for label_key in label_keys:
A = signature_names.index(_lowerCAmelCase )
A = label_key
A = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
A = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
A = prepared_for_class[value]
A = tuple(_lowerCAmelCase )
# Send to model
A = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def A (self : Dict ):
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A (self : str ):
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A = type
self.model_tester.create_and_check_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A (self : Optional[Any] ):
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A (self : List[str] ):
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A (self : Dict ):
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@slow
def A (self : int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFLayoutLMvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __a ( ) ->Any:
"""simple docstring"""
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A (self : Tuple ):
return LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase ) if is_vision_available() else None
@slow
def A (self : Optional[int] ):
A = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" ).pixel_values
A = tf.constant([[1, 2]] )
A = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
A = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
A = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase )
A = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 258 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : str ):
A = """ylacombe/bark-small"""
A = tempfile.mkdtemp()
A = """en_speaker_1"""
A = """This is a test string"""
A = """speaker_embeddings_path.json"""
A = """speaker_embeddings"""
def A (self : Optional[Any] , **_lowerCAmelCase : Any ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCAmelCase )
def A (self : Dict ):
shutil.rmtree(self.tmpdirname )
def A (self : Optional[Any] ):
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
A = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def A (self : int ):
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def A (self : Union[str, Any] ):
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
A = 35
A = 2
A = 8
A = {
"""semantic_prompt""": np.ones(_lowerCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
A = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
A = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_lowerCAmelCase , **_lowerCAmelCase )
A = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
A = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
A = processor(text=self.input_string , voice_preset=self.voice_preset )
def A (self : str ):
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=_lowerCAmelCase )
A = processor(text=self.input_string )
A = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 258 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Union[str, Any] = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
_lowerCamelCase : List[Any] = 6_3_7_8_1_3_7.0
_lowerCamelCase : List[Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
_lowerCamelCase : Optional[int] = 637_8137
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
A = (AXIS_A - AXIS_B) / AXIS_A
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = radians(UpperCAmelCase )
A = radians(UpperCAmelCase )
# Equation
A = sin((phi_a - phi_a) / 2 )
A = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A = sqrt(sin_sq_phi + (cos(UpperCAmelCase ) * cos(UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 1 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
try:
A = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A = default
else:
# KEY is set, convert it to True or False.
try:
A = strtobool(UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
_lowerCamelCase : Optional[int] = parse_flag_from_env('RUN_SLOW', default=False)
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
return unittest.skip("""Test was skipped""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(UpperCAmelCase )
def __a ( UpperCAmelCase=None , UpperCAmelCase=None ) ->Optional[int]:
"""simple docstring"""
if test_case is None:
return partial(UpperCAmelCase , version=UpperCAmelCase )
return unittest.skipUnless(is_torch_version(""">=""" , UpperCAmelCase ) , f"""test requires torch version >= {version}""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(UpperCAmelCase )
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(UpperCAmelCase )
_lowerCamelCase : List[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(UpperCAmelCase )
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
@classmethod
def A (cls : Any ):
A = tempfile.mkdtemp()
@classmethod
def A (cls : Dict ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def A (self : Union[str, Any] ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_lowerCAmelCase )
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : List[Any] ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : str , _lowerCAmelCase : Union[mock.Mock, List[mock.Mock]] ):
A = mocks if isinstance(_lowerCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
A = AcceleratorState()
A = tensor[None].clone().to(state.device )
A = gather(UpperCAmelCase ).cpu()
A = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , UpperCAmelCase ):
return False
return True
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ):
A = returncode
A = stdout
A = stderr
async def __a ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
while True:
A = await stream.readline()
if line:
callback(UpperCAmelCase )
else:
break
async def __a ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False ) ->_RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(UpperCAmelCase ) )
A = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A = []
A = []
def tee(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="" ):
A = line.decode("""utf-8""" ).rstrip()
sink.append(UpperCAmelCase )
if not quiet:
print(UpperCAmelCase , UpperCAmelCase , file=UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda UpperCAmelCase : tee(UpperCAmelCase , UpperCAmelCase , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda UpperCAmelCase : tee(UpperCAmelCase , UpperCAmelCase , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=UpperCAmelCase , )
return _RunOutput(await p.wait() , UpperCAmelCase , UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=180 , UpperCAmelCase=False , UpperCAmelCase=True ) ->_RunOutput:
"""simple docstring"""
A = asyncio.get_event_loop()
A = loop.run_until_complete(
_stream_subprocess(UpperCAmelCase , env=UpperCAmelCase , stdin=UpperCAmelCase , timeout=UpperCAmelCase , quiet=UpperCAmelCase , echo=UpperCAmelCase ) )
A = """ """.join(UpperCAmelCase )
if result.returncode > 0:
A = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
pass
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
try:
A = subprocess.check_output(UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCAmelCase , """decode""" ):
A = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{" ".join(UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 258 |
'''simple docstring'''
import os
import sys
import unittest
_lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : str = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
_lowerCamelCase : Tuple = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Any ):
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = {"""BertModelTest""": """BertModelTester"""}
A = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : str ):
A = get_model_to_test_mapping(_lowerCAmelCase )
A = get_model_to_test_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
| 258 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Tuple , _lowerCAmelCase : CLIPSegForImageSegmentation , _lowerCAmelCase : CLIPSegProcessor , _lowerCAmelCase : AutoencoderKL , _lowerCAmelCase : CLIPTextModel , _lowerCAmelCase : CLIPTokenizer , _lowerCAmelCase : UNetaDConditionModel , _lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowerCAmelCase : StableDiffusionSafetyChecker , _lowerCAmelCase : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
A = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
A = dict(scheduler.config )
A = 1
A = FrozenDict(_lowerCAmelCase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
A = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
A = dict(scheduler.config )
A = True
A = FrozenDict(_lowerCAmelCase )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=_lowerCAmelCase , segmentation_processor=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , )
def A (self : List[str] , _lowerCAmelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def A (self : Tuple ):
self.enable_attention_slicing(_lowerCAmelCase )
def A (self : Union[str, Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A (self : Optional[Any] ):
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self : Optional[Any] , _lowerCAmelCase : Union[str, List[str]] , _lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , _lowerCAmelCase : str , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 50 , _lowerCAmelCase : float = 7.5 , _lowerCAmelCase : Optional[Union[str, List[str]]] = None , _lowerCAmelCase : Optional[int] = 1 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : List[str] , ):
A = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
A = self.segmentation_model(**_lowerCAmelCase )
A = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
A = self.numpy_to_pil(_lowerCAmelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
A = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , height=_lowerCAmelCase , width=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , negative_prompt=_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase , eta=_lowerCAmelCase , generator=_lowerCAmelCase , latents=_lowerCAmelCase , output_type=_lowerCAmelCase , return_dict=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=_lowerCAmelCase , )
| 258 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if len(UpperCAmelCase ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase , n - 1 )
rec_insertion_sort(UpperCAmelCase , n - 1 )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if index >= len(UpperCAmelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A , A = (
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase , index + 1 )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input('Enter integers separated by spaces: ')
_lowerCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 258 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowerCamelCase : int = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_lowerCamelCase : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_lowerCamelCase : Optional[int] = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
return float((preds == labels).mean() )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="binary" ) ->Any:
"""simple docstring"""
A = simple_accuracy(UpperCAmelCase , UpperCAmelCase )
A = float(fa_score(y_true=UpperCAmelCase , y_pred=UpperCAmelCase , average=UpperCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = {}
for id_pred, label in zip(UpperCAmelCase , UpperCAmelCase ):
A = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
A = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
A = [(pred, label)]
A , A = [], []
for question, preds_labels in question_map.items():
A , A = zip(*UpperCAmelCase )
A = fa_score(y_true=UpperCAmelCase , y_pred=UpperCAmelCase , average="""macro""" )
fas.append(UpperCAmelCase )
A = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCAmelCase ) )
ems.append(UpperCAmelCase )
A = float(sum(UpperCAmelCase ) / len(UpperCAmelCase ) )
A = sum(UpperCAmelCase ) / len(UpperCAmelCase )
A = float(fa_score(y_true=UpperCAmelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def A (self : List[Any] ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def A (self : str ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def A (self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase , fa_avg="""macro""" )
elif self.config_name == "record":
A = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
A = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowerCAmelCase , _lowerCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowerCAmelCase , _lowerCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 258 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __a ( UpperCAmelCase ) ->Tuple: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __a ( UpperCAmelCase ) ->int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def A (self : Tuple ):
A = {}
A = []
A = 1
A = [1, 2]
A = {"""a""": 1, """b""": 2}
A = {"""a""": [1, 2], """b""": [3, 4]}
A = {"""a""": {"""1""": 1}, """b""": 2}
A = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
A = {}
A = []
A = 2
A = [2, 3]
A = {"""a""": 2, """b""": 3}
A = {"""a""": [2, 3], """b""": [4, 5]}
A = {"""a""": {"""1""": 2}, """b""": 3}
A = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
A = 2
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
A = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
A = {"""a""": 2, """b""": 0, """c""": 2}
A = {
"""a""": np.eye(2 ).astype(_lowerCAmelCase ),
"""b""": np.zeros(3 ).astype(_lowerCAmelCase ),
"""c""": np.ones(2 ).astype(_lowerCAmelCase ),
}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCAmelCase : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase )
def A (self : List[Any] ):
A = {"""a""": 1, """b""": 2}
A = {"""a""": 3, """b""": 4}
A = {"""a""": 5, """b""": 6}
A = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = '''bar'''
A = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_lowerCAmelCase , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
A = {f"""{i}""": i for i in range(UpperCAmelCase )}
A = map_nested(lambda UpperCAmelCase : x + 10 , UpperCAmelCase , num_proc=UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@require_tf
def A (self : Dict ):
import tensorflow as tf
from tensorflow.keras import layers
A = layers.Dense(2 )
def gen_random_output():
A = tf.random.uniform((1, 3) )
return model(_lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A (self : Tuple ):
import torch
def gen_random_output():
A = torch.nn.Linear(3 , 2 )
A = torch.rand(1 , 3 )
return model(_lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A (self : str ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A = gen_random_output()
with temp_seed(42 ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).flatten()
assert output == expected_output
def __a ( ) ->Optional[Any]:
"""simple docstring"""
A = A(x=1 , y="""foobar""" )
A = {"""x""": 1, """y""": """foobar"""}
assert asdict(UpperCAmelCase ) == expected_output
A = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
A = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(UpperCAmelCase ) == expected_output
with pytest.raises(UpperCAmelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return text.split()
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __a ( ) ->Optional[int]:
"""simple docstring"""
with Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(UpperCAmelCase ) == 4
| 258 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Any = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __a ( ) ->None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 258 | 1 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : Any ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
A = {}
def A (self : Union[str, Any] , _lowerCAmelCase : List[Any] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : int ):
A = super().add_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def A (self : Tuple , _lowerCAmelCase : Dict , *_lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=1 , **_lowerCAmelCase : List[str] ):
A = []
if num_vec_per_token == 1:
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
else:
A = []
for i in range(_lowerCAmelCase ):
A = placeholder_token + F"""_{i}"""
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
A = output
def A (self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Any=1.0 ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = []
for i in range(len(_lowerCAmelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowerCAmelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A = self.token_map[placeholder_token]
A = tokens[: 1 + int(len(_lowerCAmelCase ) * prop_tokens_to_load )]
if vector_shuffle:
A = copy.copy(_lowerCAmelCase )
random.shuffle(_lowerCAmelCase )
A = text.replace(_lowerCAmelCase , """ """.join(_lowerCAmelCase ) )
return text
def __call__(self : Tuple , _lowerCAmelCase : Optional[Any] , *_lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Any=1.0 , **_lowerCAmelCase : Dict ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , *_lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Any=1.0 , **_lowerCAmelCase : Optional[int] ):
return super().encode(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
| 258 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : str , _lowerCAmelCase : PriorTransformer , _lowerCAmelCase : CLIPVisionModel , _lowerCAmelCase : CLIPImageProcessor , _lowerCAmelCase : HeunDiscreteScheduler , _lowerCAmelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=_lowerCAmelCase , image_encoder=_lowerCAmelCase , image_processor=_lowerCAmelCase , scheduler=_lowerCAmelCase , renderer=_lowerCAmelCase , )
def A (self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
if latents is None:
A = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
A = latents.to(_lowerCAmelCase )
A = latents * scheduler.init_noise_sigma
return latents
def A (self : Union[str, Any] , _lowerCAmelCase : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A = torch.device(F"""cuda:{gpu_id}""" )
A = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
def A (self : Optional[Any] ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def A (self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
A = torch.cat(_lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_lowerCAmelCase , axis=0 )
if not isinstance(_lowerCAmelCase , torch.Tensor ):
A = self.image_processor(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
A = image.to(dtype=self.image_encoder.dtype , device=_lowerCAmelCase )
A = self.image_encoder(_lowerCAmelCase )["""last_hidden_state"""]
A = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
A = torch.zeros_like(_lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__(self : List[Any] , _lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 25 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : float = 4.0 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ):
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
A = 1
elif isinstance(_lowerCAmelCase , torch.Tensor ):
A = image.shape[0]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A = len(_lowerCAmelCase )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_lowerCAmelCase )}""" )
A = self._execution_device
A = batch_size * num_images_per_prompt
A = guidance_scale > 1.0
A = self._encode_image(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# prior
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
A = self.scheduler.timesteps
A = self.prior.config.num_embeddings
A = self.prior.config.embedding_dim
A = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A = latents.reshape(latents.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
A = self.prior(
_lowerCAmelCase , timestep=_lowerCAmelCase , proj_embedding=_lowerCAmelCase , ).predicted_image_embedding
# remove the variance
A , A = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A , A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A = self.scheduler.step(
_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_lowerCAmelCase )
A = []
for i, latent in enumerate(_lowerCAmelCase ):
print()
A = self.renderer.decode(
latent[None, :] , _lowerCAmelCase , size=_lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_lowerCAmelCase )
A = torch.stack(_lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
A = images.cpu().numpy()
if output_type == "pil":
A = [self.numpy_to_pil(_lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_lowerCAmelCase )
| 258 | 1 |
'''simple docstring'''
from math import pow, sqrt
def __a ( *UpperCAmelCase ) ->bool:
"""simple docstring"""
A = len(UpperCAmelCase ) > 0 and all(value > 0.0 for value in values )
return result
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCAmelCase , UpperCAmelCase )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 258 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Union[str, Any] ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A = model
A = kwargs.get("""model_save_dir""" , _lowerCAmelCase )
A = kwargs.get("""latest_model_name""" , _lowerCAmelCase )
def __call__(self : Tuple , **_lowerCAmelCase : Optional[Any] ):
A = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def A (_lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A = """CPUExecutionProvider"""
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : List[str] ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A = self.model_save_dir.joinpath(self.latest_model_name )
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def A (self : Tuple , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : str , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Union[bool, str, None]] = None , _lowerCAmelCase : Optional[Union[str, None]] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional["ort.SessionOptions"] = None , **_lowerCAmelCase : Optional[int] , ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
A = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
A = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
A = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
A = Path(_lowerCAmelCase ).parent
A = Path(_lowerCAmelCase ).name
A = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : str , ):
A = None
if len(str(_lowerCAmelCase ).split("""@""" ) ) == 2:
A , A = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 258 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''donut-swin'''
__snake_case = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , __UpperCAmelCase : Optional[Any]=224 , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Any=96 , __UpperCAmelCase : Optional[int]=[2, 2, 6, 2] , __UpperCAmelCase : List[Any]=[3, 6, 12, 24] , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : List[Any]=4.0 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : int=1e-5 , **__UpperCAmelCase : Tuple , ) ->int:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
a = image_size
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__UpperCAmelCase )
a = num_heads
a = window_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = use_absolute_embeddings
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
| 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A (self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
A = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A = VideoClassificationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase , top_k=2 )
A = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def A (self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
for example in examples:
A = video_classifier(_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
] , )
@require_torch
def A (self : Optional[Any] ):
A = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
A = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
A = pipeline(
"""video-classification""" , model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , frame_sampling_rate=4 )
A = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A = video_classifier(_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , )
A = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] , )
@require_tf
def A (self : List[Any] ):
pass
| 258 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[int] , *__a : Optional[Any] , **__a : Dict ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCamelCase : int = 10
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
for i in range(UpperCAmelCase , UpperCAmelCase ):
if array[i] == target:
return i
return -1
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = 0
A = len(UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A = one_third - 1
elif array[two_third] < target:
A = two_third + 1
else:
A = one_third + 1
A = two_third - 1
else:
return -1
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCAmelCase , one_third - 1 , UpperCAmelCase , UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCAmelCase , UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : str = input('Enter numbers separated by comma:\n').strip()
_lowerCamelCase : str = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCamelCase : Optional[int] = int(input('Enter the number to be found in the list:\n').strip())
_lowerCamelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCamelCase : Union[str, Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('Not found')
| 258 | 0 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCamelCase : int = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> int:
"""simple docstring"""
if "." in tensor_name:
lowercase__ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase__ = getattr(A , A )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
lowercase__ = new_module
lowercase__ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." )
lowercase__ = tensor_name in module._buffers
lowercase__ = getattr(A , A )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
lowercase__ = False
lowercase__ = False
if is_buffer or not is_bitsandbytes_available():
lowercase__ = False
lowercase__ = False
else:
lowercase__ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase__ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase__ = old_value.to(A )
elif isinstance(A , torch.Tensor ):
lowercase__ = value.to('''cpu''' )
if value.dtype == torch.inta:
lowercase__ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowercase__ = torch.tensor(A , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , A ) and fpaa_statistics is None:
lowercase__ = new_value.T
lowercase__ = old_value.__dict__
if is_abit:
lowercase__ = bnb.nn.IntaParams(A , requires_grad=A , **A ).to(A )
elif is_abit:
lowercase__ = bnb.nn.Paramsabit(A , requires_grad=A , **A ).to(A )
lowercase__ = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(A ) )
else:
if value is None:
lowercase__ = old_value.to(A )
elif isinstance(A , torch.Tensor ):
lowercase__ = value.to(A )
else:
lowercase__ = torch.tensor(A , device=A )
if is_buffer:
lowercase__ = new_value
else:
lowercase__ = nn.Parameter(A , requires_grad=old_value.requires_grad )
lowercase__ = new_value
def _SCREAMING_SNAKE_CASE (A , A=None , A=None , A=None , A=False ) -> Union[str, Any]:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
lowercase__ = []
current_key_name.append(A )
if (isinstance(A , nn.Linear ) or isinstance(A , A )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(A ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(A , A ):
lowercase__ ,lowercase__ = module.weight.shape
else:
lowercase__ = module.in_features
lowercase__ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase__ = bnb.nn.LinearabitLt(
A , A , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase__ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase__ = bnb.nn.Linearabit(
A , A , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase__ = True
# Store the module class in case we need to transpose the weight later
lowercase__ = type(A )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(A )
if len(list(module.children() ) ) > 0:
lowercase__ ,lowercase__ = _replace_with_bnb_linear(
A , A , A , A , has_been_replaced=A , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _SCREAMING_SNAKE_CASE (A , A=None , A=None , A=None ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowercase__ ,lowercase__ = _replace_with_bnb_linear(
A , A , A , A )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _SCREAMING_SNAKE_CASE (*A , **A ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , A , )
return replace_with_bnb_linear(*A , **A )
def _SCREAMING_SNAKE_CASE (*A , **A ) -> Tuple:
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , A , )
return set_module_quantized_tensor_to_device(*A , **A )
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
lowercase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase__ = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
lowercase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase__ = sum(A , [] )
lowercase__ = len(A ) > 0
# Check if it is a base model
lowercase__ = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase__ = list(model.named_children() )
lowercase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase__ = set(A ) - set(A )
lowercase__ = list(set(A ) ) + list(A )
# remove ".weight" from the keys
lowercase__ = ['''.weight''', '''.bias''']
lowercase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase__ = name.replace(A , '''''' )
filtered_module_names.append(A )
return filtered_module_names
| 2 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = CycleDiffusionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A (self : int ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A = CLIPTextModel(_lowerCAmelCase )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A (self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=0 ):
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
A = image / 2 + 0.5
if str(_lowerCAmelCase ).startswith("""mps""" ):
A = torch.manual_seed(_lowerCAmelCase )
else:
A = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
A = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def A (self : Any ):
A = """cpu""" # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A (self : str ):
A = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCAmelCase , """half""" ):
A = module.half()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A (self : Optional[int] ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def A (self : Optional[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def A (self : Dict ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A (self : Optional[Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A (self : Optional[int] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 258 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
lowercase : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
for attribute in key.split('''.''' ):
A : Dict = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
A : Dict = getattr(snake_case__ , snake_case__ ).shape
else:
A : str = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
A : List[str] = value
elif weight_type == "weight_g":
A : str = value
elif weight_type == "weight_v":
A : Optional[Any] = value
elif weight_type == "bias":
A : List[str] = value
else:
A : Tuple = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = []
A : Tuple = fairseq_model.state_dict()
A : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A : Any = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == '''group''' , )
A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
A : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
A : str = True
if "*" in mapped_key:
A : str = name.split(snake_case__ )[0].split('''.''' )[-2]
A : int = mapped_key.replace('''*''' , snake_case__ )
if "weight_g" in name:
A : List[str] = '''weight_g'''
elif "weight_v" in name:
A : str = '''weight_v'''
elif "weight" in name:
A : List[str] = '''weight'''
elif "bias" in name:
A : Optional[Any] = '''bias'''
else:
A : Optional[Any] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[int] = full_name.split('''conv_layers.''' )[-1]
A : Dict = name.split('''.''' )
A : Union[str, Any] = int(items[0] )
A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A : int = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A : Any = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A : Union[str, Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A : Tuple = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True ):
'''simple docstring'''
if config_path is not None:
A : Tuple = HubertConfig.from_pretrained(snake_case__ )
else:
A : Dict = HubertConfig()
if is_finetuned:
if dict_path:
A : Any = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A : str = target_dict.pad_index
A : Optional[int] = target_dict.bos_index
A : Optional[int] = target_dict.eos_index
A : Optional[int] = len(target_dict.symbols )
A : Union[str, Any] = os.path.join(snake_case__ , '''vocab.json''' )
if not os.path.isdir(snake_case__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
A : Optional[int] = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case__ , )
A : Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
A : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
A : Dict = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
A : Union[str, Any] = HubertForCTC(snake_case__ )
else:
A : List[str] = HubertModel(snake_case__ )
if is_finetuned:
A, A, A : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
A, A, A : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A : int = model[0].eval()
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase : Dict = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Tuple , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A (self : Any , _lowerCAmelCase : str=None ):
A = {}
if top_k is not None:
A = top_k
return {}, {}, postprocess_params
def __call__(self : str , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : int ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = load_image(_lowerCAmelCase )
A = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A (self : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
A = self.model(**_lowerCAmelCase )
return model_outputs
def A (self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=5 ):
if top_k > self.model.config.num_labels:
A = self.model.config.num_labels
if self.framework == "pt":
A = model_outputs.logits.softmax(-1 )[0]
A , A = probs.topk(_lowerCAmelCase )
elif self.framework == "tf":
A = stable_softmax(model_outputs.logits , axis=-1 )[0]
A = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
A , A = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
A = scores.tolist()
A = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCAmelCase , _lowerCAmelCase )]
| 258 | 0 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : str = (KDPMaDiscreteScheduler,)
lowerCamelCase : Optional[int] = 10
def __UpperCAmelCase ( self : List[Any] , **UpperCAmelCase__ : Optional[int] ) -> Any:
lowerCAmelCase = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**UpperCAmelCase__ )
return config
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def __UpperCAmelCase ( self : Tuple ) -> Dict:
if torch_device == "mps":
return
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
if torch_device == "mps":
return
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.to(UpperCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
if str(UpperCAmelCase__ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 4 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_lowercase =argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=__snake_case , default=__snake_case , required=__snake_case , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=__snake_case , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=__snake_case , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=__snake_case , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=__snake_case , default=0 , help='''cuda_id.''' , )
_lowercase =parser.parse_args()
return args
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
if not len(__snake_case ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
_lowercase , _lowercase =imgs[0].size
_lowercase =Image.new('''RGB''' , size=(cols * w, rows * h) )
_lowercase , _lowercase =grid.size
for i, img in enumerate(__snake_case ):
grid.paste(__snake_case , box=(i % cols * w, i // cols * h) )
return grid
def UpperCAmelCase_ ( __snake_case , __snake_case="robotic cat with wings" , __snake_case=7.5 , __snake_case=50 , __snake_case=1 , __snake_case=42 , ) -> Optional[int]:
"""simple docstring"""
_lowercase =torch.Generator(pipeline.device ).manual_seed(__snake_case )
_lowercase =pipeline(
__snake_case , guidance_scale=__snake_case , num_inference_steps=__snake_case , generator=__snake_case , num_images_per_prompt=__snake_case , ).images
_lowercase =int(math.sqrt(__snake_case ) )
_lowercase =image_grid(__snake_case , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCAmelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
UpperCAmelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
UpperCAmelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
UpperCAmelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
UpperCAmelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCAmelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
UpperCAmelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
UpperCAmelCase__ = unet.to(torch.device('''cuda''', args.cuda_id))
UpperCAmelCase__ = pipeline.to(unet.device)
UpperCAmelCase__ ,UpperCAmelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
UpperCAmelCase__ = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 5 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BioGptTokenizer
__lowerCAmelCase = False
def A (self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def A (self : Tuple , _lowerCAmelCase : List[str] ):
A = """lower newer"""
A = """lower newer"""
return input_text, output_text
def A (self : List[Any] ):
A = BioGptTokenizer(self.vocab_file , self.merges_file )
A = """lower"""
A = ["""low""", """er</w>"""]
A = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
A = tokens + ["""<unk>"""]
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def A (self : Union[str, Any] ):
A = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 258 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __lowerCAmelCase ( a__ ) -> Dict:
__a = botoa.client('''iam''' )
__a = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=a__ , AssumeRolePolicyDocument=json.dumps(a__ , indent=2 ) )
__a = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=a__ , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(a__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def __lowerCAmelCase ( a__ ) -> Optional[int]:
__a = botoa.client('''iam''' )
return iam_client.get_role(RoleName=a__ )["Role"]["Arn"]
def __lowerCAmelCase ( ) -> Tuple:
__a = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , a__ , )
__a = None
if credentials_configuration == 0:
__a = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
__a = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
__a = _ask_field('''AWS Access Key ID: ''' )
__a = aws_access_key_id
__a = _ask_field('''AWS Secret Access Key: ''' )
__a = aws_secret_access_key
__a = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
__a = aws_region
__a = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , a__ , )
if role_management == 0:
__a = _ask_field('''Enter your IAM role name: ''' )
else:
__a = '''accelerate_sagemaker_execution_role'''
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(a__ )
__a = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
__a = None
if is_custom_docker_image:
__a = _ask_field('''Enter your Docker image: ''' , lambda a__ : str(a__ ).lower() )
__a = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
__a = None
if is_sagemaker_inputs_enabled:
__a = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda a__ : str(a__ ).lower() , )
__a = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
__a = None
if is_sagemaker_metrics_enabled:
__a = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda a__ : str(a__ ).lower() , )
__a = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
__a = {}
__a = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
if use_dynamo:
__a = '''dynamo_'''
__a = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__a = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
if use_custom_options:
__a = _ask_options(
'''Which mode do you want to use?''' , a__ , lambda a__ : TORCH_DYNAMO_MODES[int(a__ )] , default='''default''' , )
__a = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
__a = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
__a = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
__a = _ask_options(
a__ , a__ , lambda a__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__a = _ask_field(a__ , lambda a__ : str(a__ ).lower() , default='''ml.p3.2xlarge''' )
__a = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__a = _ask_field(
'''How many machines do you want use? [1]: ''' , a__ , default=1 , )
__a = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=a__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=a__ , use_cpu=a__ , dynamo_config=a__ , eca_instance_type=a__ , profile=a__ , region=a__ , iam_role_name=a__ , mixed_precision=a__ , num_machines=a__ , sagemaker_inputs_file=a__ , sagemaker_metrics_file=a__ , ) | 6 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return np.maximum(0 , UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258 | 0 |
lowercase_ = {str(digit): digit**5 for digit in range(10)}
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(SCREAMING_SNAKE_CASE__ ) )
def _snake_case( ) -> int:
'''simple docstring'''
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
print(solution())
| 7 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=13 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : str=[1, 2, 1] , _lowerCAmelCase : List[Any]=[2, 2, 4] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Any=2.0 , _lowerCAmelCase : Any=True , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=10 , _lowerCAmelCase : int=8 , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = num_heads
A = window_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = use_absolute_embeddings
A = patch_norm
A = layer_norm_eps
A = initializer_range
A = is_training
A = scope
A = use_labels
A = type_sequence_label_size
A = encoder_stride
def A (self : Dict ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def A (self : Optional[Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A (self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] ):
A = SwinvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase )
A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A (self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
A = SwinvaForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A = 1
A = SwinvaForMaskedImageModeling(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A (self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ):
A = self.type_sequence_label_size
A = SwinvaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A (self : Union[str, Any] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__lowerCAmelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Any ):
A = SwinvaModelTester(self )
A = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
def A (self : Dict ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A (self : int ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A (self : Dict ):
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A (self : Optional[int] ):
pass
def A (self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def A (self : Optional[int] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def A (self : int ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
A = True
A = False
A = True
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.attentions
A = len(self.model_tester.depths )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A = True
A = config.window_size**2
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
A = len(_lowerCAmelCase )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
A = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
A = 2
self.assertEqual(out_len + added_hidden_states , len(_lowerCAmelCase ) )
A = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def A (self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.hidden_states
A = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swinv2 has a different seq_length
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
A = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
A , A , A , A = reshaped_hidden_states[0].shape
A = (
reshaped_hidden_states[0].view(_lowerCAmelCase , _lowerCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A (self : Tuple ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A (self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
def A (self : Optional[int] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def A (self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def A (self : Optional[Any] ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = SwinvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def A (self : Optional[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
A = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A (self : List[str] ):
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A (self : List[str] ):
A = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
_lowerCAmelCase )
A = self.default_image_processor
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
A = model(**_lowerCAmelCase )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
A = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 258 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = "isbn/0140328726" ):
snake_case_ = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
snake_case_ = F'''{olid} is not a valid Open Library olid'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
snake_case_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
snake_case_ = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
snake_case_ = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = ''', '''.join(SCREAMING_SNAKE_CASE__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase_ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(f"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
lowerCAmelCase_ = summarize_book(get_openlibrary_data(f"""isbn/{isbn}"""))
print('''\n'''.join(f"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"""Sorry, there are no results for ISBN: {isbn}.""") | 8 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
def get_matched_characters(UpperCAmelCase , UpperCAmelCase ) -> str:
A = []
A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
A = int(max(0 , i - limit ) )
A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCAmelCase )
A = f"""{_stra[0:_stra.index(UpperCAmelCase )]} {_stra[_stra.index(UpperCAmelCase ) + 1:]}"""
return "".join(UpperCAmelCase )
# matching characters
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = len(UpperCAmelCase )
# transposition
A = (
len([(ca, ca) for ca, ca in zip(UpperCAmelCase , UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
A = 0.0
else:
A = (
1
/ 3
* (
match_count / len(UpperCAmelCase )
+ match_count / len(UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 258 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ : Dict = '''LayoutLMv2ImageProcessor'''
SCREAMING_SNAKE_CASE__ : Dict = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self :Union[str, Any] , lowerCAmelCase__ :int=None , lowerCAmelCase__ :List[str]=None , **lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]:
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__SCREAMING_SNAKE_CASE : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCAmelCase__ :Union[List[List[int]], List[List[List[int]]]] = None , lowerCAmelCase__ :Optional[Union[List[int], List[List[int]]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ :Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ :List[Any] , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__SCREAMING_SNAKE_CASE : str = self.image_processor(images=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = [text] # add batch dimension (as the image processor always adds a batch dimension)
__SCREAMING_SNAKE_CASE : List[str] = features['''words''']
__SCREAMING_SNAKE_CASE : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# add pixel values
__SCREAMING_SNAKE_CASE : Optional[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__SCREAMING_SNAKE_CASE : List[str] = self.get_overflowing_images(lowerCAmelCase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
__SCREAMING_SNAKE_CASE : List[str] = images
return encoded_inputs
def __magic_name__( self :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] ) -> Optional[int]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__SCREAMING_SNAKE_CASE : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase__ )} and {len(lowerCAmelCase__ )}''' )
return images_with_overflow
def __magic_name__( self :Any , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , *lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :Union[str, Any] ) -> str:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __magic_name__( self :str ) -> Dict:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __magic_name__( self :Tuple ) -> List[Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase__ , )
return self.image_processor
| 9 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_lowerCamelCase : List[str] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_lowerCamelCase : List[Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_lowerCamelCase : Dict = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def A (self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A (self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score
| 258 | 0 |
def lowerCAmelCase_ ( __a = 50 ) -> int:
"""simple docstring"""
lowerCamelCase__: List[str] =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__lowerCAmelCase = None
class __UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__lowerCAmelCase = PandasConfig
def A (self : Tuple ):
return datasets.DatasetInfo(features=self.config.features )
def A (self : Optional[int] , _lowerCAmelCase : List[Any] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
A = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def A (self : Dict , _lowerCAmelCase : pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A = table_cast(_lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def A (self : List[Any] , _lowerCAmelCase : Optional[Any] ):
for i, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
with open(_lowerCAmelCase , """rb""" ) as f:
A = pa.Table.from_pandas(pd.read_pickle(_lowerCAmelCase ) )
yield i, self._cast_table(_lowerCAmelCase )
| 258 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowerCAmelCase__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowerCAmelCase__ = typing.Union[np.floataa, int, float] # noqa: UP007
def _UpperCAmelCase (UpperCamelCase__ : Vector , UpperCamelCase__ : Vector ):
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def _UpperCAmelCase (UpperCamelCase__ : Vector , UpperCamelCase__ : Vector ):
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__ , UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def _UpperCAmelCase ():
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
benchmark()
| 11 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def __a ( UpperCAmelCase = "" , ) ->bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def __a ( UpperCAmelCase = "" ) ->bool:
"""simple docstring"""
if len(UpperCAmelCase ) == 0:
return True
A = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
A = {}
for character in lower_case_input_str:
A = character_freq_dict.get(UpperCAmelCase , 0 ) + 1
A = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __a ( UpperCAmelCase = "" ) ->None:
"""simple docstring"""
print("""\nFor string = """ , UpperCAmelCase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(UpperCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_lowerCamelCase : Any = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
_lowerCamelCase : Any = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 258 | 0 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# edges = list of graph's edges
__lowerCamelCase = get_edges(A__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__lowerCamelCase, __lowerCamelCase = edges.pop()
chosen_vertices.add(A__ )
chosen_vertices.add(A__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(A__ )
return chosen_vertices
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 12 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''encodec'''
def __init__(self : List[Any] , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : List[str]=2_4000 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=128 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : int=[8, 5, 4, 2] , _lowerCAmelCase : Any="weight_norm" , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : str="reflect" , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : Tuple=1024 , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=True , **_lowerCAmelCase : List[str] , ):
A = target_bandwidths
A = sampling_rate
A = audio_channels
A = normalize
A = chunk_length_s
A = overlap
A = hidden_size
A = num_filters
A = num_residual_layers
A = upsampling_ratios
A = norm_type
A = kernel_size
A = last_kernel_size
A = residual_kernel_size
A = dilation_growth_rate
A = use_causal_conv
A = pad_mode
A = compress
A = num_lstm_layers
A = trim_right_ratio
A = codebook_size
A = codebook_dim if codebook_dim is not None else hidden_size
A = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_lowerCAmelCase )
@property
def A (self : List[Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A (self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A (self : Any ):
A = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A (self : List[str] ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCAmelCase : str = ["""small""", """medium""", """large"""]
lowerCAmelCase : Dict = """lm_head.decoder.weight"""
lowerCAmelCase : Optional[Any] = """lm_head.weight"""
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = torch.load(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
lowerCAmelCase : List[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCAmelCase : Tuple = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
lowerCAmelCase : List[Any] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 13 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = ''''''
for word_or_phrase in separated:
if not isinstance(lowercase_ , lowercase_ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(lowercase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 14 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : str ):
A = """ylacombe/bark-small"""
A = tempfile.mkdtemp()
A = """en_speaker_1"""
A = """This is a test string"""
A = """speaker_embeddings_path.json"""
A = """speaker_embeddings"""
def A (self : Optional[Any] , **_lowerCAmelCase : Any ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCAmelCase )
def A (self : Dict ):
shutil.rmtree(self.tmpdirname )
def A (self : Optional[Any] ):
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
A = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def A (self : int ):
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def A (self : Union[str, Any] ):
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
A = 35
A = 2
A = 8
A = {
"""semantic_prompt""": np.ones(_lowerCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
A = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
A = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_lowerCAmelCase , **_lowerCAmelCase )
A = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
A = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
A = processor(text=self.input_string , voice_preset=self.voice_preset )
def A (self : str ):
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=_lowerCAmelCase )
A = processor(text=self.input_string )
A = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 258 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase ( a_ , a_ , a_=None , a_=None ) -> Any:
"""simple docstring"""
if attention_mask is None:
__A = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = OPTConfig
snake_case_ = {}
snake_case_ = "gelu"
def __init__( self : Tuple ,A : Tuple ,A : Optional[Any]=13 ,A : Union[str, Any]=7 ,A : Tuple=True ,A : Optional[int]=False ,A : List[str]=99 ,A : str=16 ,A : Optional[Any]=2 ,A : List[str]=4 ,A : Optional[int]=4 ,A : str="gelu" ,A : Any=0.1 ,A : int=0.1 ,A : int=20 ,A : Tuple=2 ,A : Optional[int]=1 ,A : Union[str, Any]=0 ,A : str=16 ,A : Dict=16 ,):
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = eos_token_id
__A = pad_token_id
__A = bos_token_id
__A = embed_dim
__A = word_embed_proj_dim
__A = False
def UpperCamelCase_ ( self : List[str] ):
__A = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
__A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
__A = tf.concat([input_ids, eos_tensor] ,axis=1 )
__A = self.config_cls(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,embed_dim=self.embed_dim ,word_embed_proj_dim=self.word_embed_proj_dim ,is_encoder_decoder=A ,**self.config_updates ,)
__A = prepare_opt_inputs_dict(A ,A )
return config, inputs_dict
def UpperCamelCase_ ( self : Dict ,A : int ,A : Optional[int] ):
__A = TFOPTModel(config=A )
__A = inputs_dict["input_ids"]
__A = input_ids[:1, :]
__A = inputs_dict["attention_mask"][:1, :]
__A = 1
# first forward pass
__A = model(A ,attention_mask=A ,use_cache=A )
__A , __A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__A = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__A = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
__A = tf.concat([input_ids, next_tokens] ,axis=-1 )
__A = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
__A = model(A ,attention_mask=A )[0]
__A = model(A ,attention_mask=A ,past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
__A = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
__A = output_from_no_past[:, -3:, random_slice_idx]
__A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A ,A ,rtol=1E-3 )
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
snake_case_ = (TFOPTForCausalLM,) if is_tf_available() else ()
snake_case_ = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 10
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = TFOPTModelTester(self )
__A = ConfigTester(self ,config_class=A )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
def UpperCamelCase_ ( self : Any ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(A : Optional[int] ,A : Optional[Any] ):
if hasattr(A ,"weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(A ,"weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__A = model_class(config=A )
__A = _get_word_embedding_weight(A ,model.get_input_embeddings() )
__A = _get_word_embedding_weight(A ,model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(A )
__A = _get_word_embedding_weight(A ,model.get_input_embeddings() )
__A = _get_word_embedding_weight(A ,model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__A = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] ,A )
# check that weights remain the same after resizing
__A = True
for pa, pa in zip(old_input_embeddings.value() ,new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__A = False
self.assertTrue(A )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] ,A )
__A = True
for pa, pa in zip(old_output_embeddings.value() ,new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__A = False
self.assertTrue(A )
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
return tf.constant(a_ , dtype=tf.intaa )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
snake_case_ = 99
def UpperCamelCase_ ( self : Tuple ):
__A = tf.ones((4, 1) ,dtype=tf.intaa ) * 2
__A = tf.concat([ids_tensor((4, 6) ,self.vocab_size - 3 ) + 3, eos_column_vector] ,axis=1 )
__A = input_ids.shape[0]
__A = OPTConfig(
vocab_size=self.vocab_size ,hidden_size=24 ,num_hidden_layers=2 ,num_attention_heads=2 ,ffn_dim=32 ,max_position_embeddings=48 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,)
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : List[str] ):
__A = TFOPTModel.from_pretrained("facebook/opt-350m" )
__A = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__A = tf.not_equal(A ,model.config.pad_token_id )
with tf.GradientTape():
__A = model(input_ids=A ,attention_mask=A ).last_hidden_state
__A = (1, 11, 5_12)
self.assertEqual(output.shape ,A )
__A = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] ,A ,atol=4E-3 ) )
__A = tf.function(A ,jit_compile=A )
__A = xla_generate(A ,A )[0]
self.assertTrue(np.allclose(output[:, :3, :3] ,A ,atol=4E-2 ) )
@require_tf
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
super().setUp()
__A = "facebook/opt-350m"
def UpperCamelCase_ ( self : Optional[int] ):
__A = TFOPTForCausalLM.from_pretrained(self.path_model )
__A = GPTaTokenizer.from_pretrained(self.path_model )
__A = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__A = tokenizer(A ,return_tensors="tf" ,padding=A ,add_special_tokens=A )
__A = tf.math.reduce_mean(model(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
__A = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(A ,A ,atol=1E-4 ) )
__A = tf.function(A ,jit_compile=A )
__A = tf.math.reduce_mean(xla_generate(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
self.assertTrue(np.allclose(A ,A ,atol=1E-4 ) )
@require_tf
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : int ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase_ ( self : Optional[int] ):
__A = "facebook/opt-125m"
__A = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
__A = []
__A = GPTaTokenizer.from_pretrained(A )
__A = TFOPTForCausalLM.from_pretrained(A )
for prompt in self.prompts:
__A = tokenizer(A ,return_tensors="tf" ).input_ids
__A = model.generate(A ,max_length=10 )
__A = tokenizer.batch_decode(A ,skip_special_tokens=A )
predicted_outputs += generated_string
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : int ):
__A = "facebook/opt-350m"
__A = GPTaTokenizer.from_pretrained(A )
__A = TFOPTForCausalLM.from_pretrained(A )
__A = "left"
# use different length sentences to test batching
__A = [
"Hello, my dog is a little",
"Today, I",
]
__A = tokenizer(A ,return_tensors="tf" ,padding=A )
__A = inputs["input_ids"]
__A = model.generate(input_ids=A ,attention_mask=inputs["attention_mask"] )
__A = tokenizer(sentences[0] ,return_tensors="tf" ).input_ids
__A = model.generate(input_ids=A )
__A = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] ,tf.intaa ) )
__A = tokenizer(sentences[1] ,return_tensors="tf" ).input_ids
__A = model.generate(input_ids=A ,max_length=model.config.max_length - num_paddings )
__A = tokenizer.batch_decode(A ,skip_special_tokens=A )
__A = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=A )
__A = tokenizer.decode(output_padded[0] ,skip_special_tokens=A )
__A = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(A ,A )
self.assertListEqual(A ,[non_padded_sentence, padded_sentence] )
def UpperCamelCase_ ( self : Dict ):
__A = "facebook/opt-350m"
__A = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
__A = []
__A = GPTaTokenizer.from_pretrained(A )
__A = TFOPTForCausalLM.from_pretrained(A )
for prompt in self.prompts:
__A = tokenizer(A ,return_tensors="tf" ).input_ids
__A = model.generate(A ,max_length=10 )
__A = tokenizer.batch_decode(A ,skip_special_tokens=A )
predicted_outputs += generated_string
self.assertListEqual(A ,A )
| 15 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
_lowerCamelCase : List[Any] = 6_3_7_8_1_3_7.0
_lowerCamelCase : List[Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
_lowerCamelCase : Optional[int] = 637_8137
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
A = (AXIS_A - AXIS_B) / AXIS_A
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = radians(UpperCAmelCase )
A = radians(UpperCAmelCase )
# Equation
A = sin((phi_a - phi_a) / 2 )
A = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A = sqrt(sin_sq_phi + (cos(UpperCAmelCase ) * cos(UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
'''simple docstring'''
import os
import sys
import unittest
_lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : str = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
_lowerCamelCase : Tuple = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Any ):
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = {"""BertModelTest""": """BertModelTester"""}
A = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : str ):
A = get_model_to_test_mapping(_lowerCAmelCase )
A = get_model_to_test_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
| 258 | 0 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Any, UpperCamelCase_ : Dict, UpperCamelCase_ : Optional[int], UpperCamelCase_ : List[Any]) -> str:
'''simple docstring'''
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCamelCase_, torch_dtype=torch.floataa)
# load LoRA weight from .safetensors
__lowercase = load_file(UpperCamelCase_)
__lowercase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowercase = key.split(".")[0].split(LORA_PREFIX_TEXT_ENCODER + "_")[-1].split("_")
__lowercase = pipeline.text_encoder
else:
__lowercase = key.split(".")[0].split(LORA_PREFIX_UNET + "_")[-1].split("_")
__lowercase = pipeline.unet
# find the target layer
__lowercase = layer_infos.pop(0)
while len(UpperCamelCase_) > -1:
try:
__lowercase = curr_layer.__getattr__(UpperCamelCase_)
if len(UpperCamelCase_) > 0:
__lowercase = layer_infos.pop(0)
elif len(UpperCamelCase_) == 0:
break
except Exception:
if len(UpperCamelCase_) > 0:
temp_name += "_" + layer_infos.pop(0)
else:
__lowercase = layer_infos.pop(0)
__lowercase = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down", "lora_up"))
pair_keys.append(UpperCamelCase_)
else:
pair_keys.append(UpperCamelCase_)
pair_keys.append(key.replace("lora_up", "lora_down"))
# update weight
if len(state_dict[pair_keys[0]].shape) == 4:
__lowercase = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.floataa)
__lowercase = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase_, UpperCamelCase_).unsqueeze(2).unsqueeze(3)
else:
__lowercase = state_dict[pair_keys[0]].to(torch.floataa)
__lowercase = state_dict[pair_keys[1]].to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase_, UpperCamelCase_)
# update visited list
for item in pair_keys:
visited.append(UpperCamelCase_)
return pipeline
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
_a = parser.parse_args()
_a = args.base_model_path
_a = args.checkpoint_path
_a = args.dump_path
_a = args.lora_prefix_unet
_a = args.lora_prefix_text_encoder
_a = args.alpha
_a = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 17 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if len(UpperCAmelCase ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase , n - 1 )
rec_insertion_sort(UpperCAmelCase , n - 1 )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if index >= len(UpperCAmelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A , A = (
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase , index + 1 )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input('Enter integers separated by spaces: ')
_lowerCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 258 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__lowerCamelCase : List[str] = HfApi()
__lowerCamelCase : Union[str, Any] = {}
# fmt: off
__lowerCamelCase : Any = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
__lowerCamelCase : Dict = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
__lowerCamelCase : int = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
__lowerCamelCase : Optional[int] = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
__lowerCamelCase : List[Any] = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
__lowerCamelCase : Tuple = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
__lowerCamelCase : Any = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
__lowerCamelCase : Any = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
__lowerCamelCase : Optional[Any] = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
__lowerCamelCase : Tuple = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
__lowerCamelCase : List[str] = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
__lowerCamelCase : List[str] = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
__lowerCamelCase : Any = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
__lowerCamelCase : Dict = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
__lowerCamelCase : List[str] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
__lowerCamelCase : str = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__lowerCamelCase : Optional[int] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__lowerCamelCase : Any = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__lowerCamelCase : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__lowerCamelCase : List[Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'''{mod.modelId} has passed successfully!!!''')
| 18 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __a ( UpperCAmelCase ) ->Tuple: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __a ( UpperCAmelCase ) ->int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def A (self : Tuple ):
A = {}
A = []
A = 1
A = [1, 2]
A = {"""a""": 1, """b""": 2}
A = {"""a""": [1, 2], """b""": [3, 4]}
A = {"""a""": {"""1""": 1}, """b""": 2}
A = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
A = {}
A = []
A = 2
A = [2, 3]
A = {"""a""": 2, """b""": 3}
A = {"""a""": [2, 3], """b""": [4, 5]}
A = {"""a""": {"""1""": 2}, """b""": 3}
A = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
A = 2
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
A = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
A = {"""a""": 2, """b""": 0, """c""": 2}
A = {
"""a""": np.eye(2 ).astype(_lowerCAmelCase ),
"""b""": np.zeros(3 ).astype(_lowerCAmelCase ),
"""c""": np.ones(2 ).astype(_lowerCAmelCase ),
}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCAmelCase : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase )
def A (self : List[Any] ):
A = {"""a""": 1, """b""": 2}
A = {"""a""": 3, """b""": 4}
A = {"""a""": 5, """b""": 6}
A = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = '''bar'''
A = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_lowerCAmelCase , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
A = {f"""{i}""": i for i in range(UpperCAmelCase )}
A = map_nested(lambda UpperCAmelCase : x + 10 , UpperCAmelCase , num_proc=UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@require_tf
def A (self : Dict ):
import tensorflow as tf
from tensorflow.keras import layers
A = layers.Dense(2 )
def gen_random_output():
A = tf.random.uniform((1, 3) )
return model(_lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A (self : Tuple ):
import torch
def gen_random_output():
A = torch.nn.Linear(3 , 2 )
A = torch.rand(1 , 3 )
return model(_lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A (self : str ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A = gen_random_output()
with temp_seed(42 ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).flatten()
assert output == expected_output
def __a ( ) ->Optional[Any]:
"""simple docstring"""
A = A(x=1 , y="""foobar""" )
A = {"""x""": 1, """y""": """foobar"""}
assert asdict(UpperCAmelCase ) == expected_output
A = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
A = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(UpperCAmelCase ) == expected_output
with pytest.raises(UpperCAmelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return text.split()
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __a ( ) ->Optional[int]:
"""simple docstring"""
with Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(UpperCAmelCase ) == 4
| 258 | 0 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
return int((input_a, input_a).count(0 ) == 0 )
def lowerCamelCase_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 19 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __a ( ) ->None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 258 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 20 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : str , _lowerCAmelCase : PriorTransformer , _lowerCAmelCase : CLIPVisionModel , _lowerCAmelCase : CLIPImageProcessor , _lowerCAmelCase : HeunDiscreteScheduler , _lowerCAmelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=_lowerCAmelCase , image_encoder=_lowerCAmelCase , image_processor=_lowerCAmelCase , scheduler=_lowerCAmelCase , renderer=_lowerCAmelCase , )
def A (self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
if latents is None:
A = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
A = latents.to(_lowerCAmelCase )
A = latents * scheduler.init_noise_sigma
return latents
def A (self : Union[str, Any] , _lowerCAmelCase : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A = torch.device(F"""cuda:{gpu_id}""" )
A = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
def A (self : Optional[Any] ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def A (self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
A = torch.cat(_lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_lowerCAmelCase , axis=0 )
if not isinstance(_lowerCAmelCase , torch.Tensor ):
A = self.image_processor(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
A = image.to(dtype=self.image_encoder.dtype , device=_lowerCAmelCase )
A = self.image_encoder(_lowerCAmelCase )["""last_hidden_state"""]
A = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
A = torch.zeros_like(_lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__(self : List[Any] , _lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 25 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : float = 4.0 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ):
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
A = 1
elif isinstance(_lowerCAmelCase , torch.Tensor ):
A = image.shape[0]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A = len(_lowerCAmelCase )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_lowerCAmelCase )}""" )
A = self._execution_device
A = batch_size * num_images_per_prompt
A = guidance_scale > 1.0
A = self._encode_image(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# prior
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
A = self.scheduler.timesteps
A = self.prior.config.num_embeddings
A = self.prior.config.embedding_dim
A = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A = latents.reshape(latents.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
A = self.prior(
_lowerCAmelCase , timestep=_lowerCAmelCase , proj_embedding=_lowerCAmelCase , ).predicted_image_embedding
# remove the variance
A , A = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A , A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A = self.scheduler.step(
_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_lowerCAmelCase )
A = []
for i, latent in enumerate(_lowerCAmelCase ):
print()
A = self.renderer.decode(
latent[None, :] , _lowerCAmelCase , size=_lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_lowerCAmelCase )
A = torch.stack(_lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
A = images.cpu().numpy()
if output_type == "pil":
A = [self.numpy_to_pil(_lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_lowerCAmelCase )
| 258 | 0 |
def UpperCamelCase_( lowerCamelCase_ = 100_0000 ) -> int:
_lowercase : Optional[int] = limit + 1
_lowercase : str = [0] * limit
for first_term in range(1 , lowerCamelCase_ ):
for n in range(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : int = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowercase : str = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 21 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Union[str, Any] ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A = model
A = kwargs.get("""model_save_dir""" , _lowerCAmelCase )
A = kwargs.get("""latest_model_name""" , _lowerCAmelCase )
def __call__(self : Tuple , **_lowerCAmelCase : Optional[Any] ):
A = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def A (_lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A = """CPUExecutionProvider"""
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : List[str] ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A = self.model_save_dir.joinpath(self.latest_model_name )
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def A (self : Tuple , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : str , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Union[bool, str, None]] = None , _lowerCAmelCase : Optional[Union[str, None]] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional["ort.SessionOptions"] = None , **_lowerCAmelCase : Optional[int] , ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
A = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
A = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
A = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
A = Path(_lowerCAmelCase ).parent
A = Path(_lowerCAmelCase ).name
A = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : str , ):
A = None
if len(str(_lowerCAmelCase ).split("""@""" ) ) == 2:
A , A = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 258 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__SCREAMING_SNAKE_CASE :int = 2
class A_ :
def __init__( self : List[str] , *, # begin keyword-only arguments
snake_case_ : List[str]="<s>" , snake_case_ : Optional[int]="<pad>" , snake_case_ : Dict="</s>" , snake_case_ : Any="<unk>" , snake_case_ : Optional[Any]=None , ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = bos, unk, pad, eos
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = self.add_symbol(snake_case_ )
_UpperCAmelCase = self.add_symbol(snake_case_ )
_UpperCAmelCase = self.add_symbol(snake_case_ )
_UpperCAmelCase = self.add_symbol(snake_case_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(snake_case_ )
_UpperCAmelCase = len(self.symbols )
def __eq__( self : Optional[Any] , snake_case_ : str ):
return self.indices == other.indices
def __getitem__( self : str , snake_case_ : List[str] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Any ):
return len(self.symbols )
def __contains__( self : Union[str, Any] , snake_case_ : Tuple ):
return sym in self.indices
@classmethod
def lowercase ( cls : Tuple , snake_case_ : Tuple ):
_UpperCAmelCase = cls()
d.add_from_file(snake_case_ )
return d
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Dict=1 , snake_case_ : List[str]=False ):
if word in self.indices and not overwrite:
_UpperCAmelCase = self.indices[word]
_UpperCAmelCase = self.count[idx] + n
return idx
else:
_UpperCAmelCase = len(self.symbols )
_UpperCAmelCase = idx
self.symbols.append(snake_case_ )
self.count.append(snake_case_ )
return idx
def lowercase ( self : Tuple , snake_case_ : Any ):
return 0
def lowercase ( self : List[str] , snake_case_ : str ):
if isinstance(snake_case_ , snake_case_ ):
try:
with open(snake_case_ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(snake_case_ ) )
return
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = self._load_meta(snake_case_ )
for line in lines[indices_start_line:]:
try:
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = line.rsplit(" " , 1 )
else:
_UpperCAmelCase = False
_UpperCAmelCase = int(snake_case_ )
_UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(snake_case_ ) )
self.add_symbol(snake_case_ , n=snake_case_ , overwrite=snake_case_ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = dict((re.sub(r"@@$" , "" , __lowercase ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , __lowercase ), v) for k, v in d.items() )
_UpperCAmelCase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
_UpperCAmelCase = d[k] # restore
return da
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : List[str] ) -> Optional[int]:
'''simple docstring'''
if not os.path.exists(__lowercase ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(__lowercase , exist_ok=__lowercase )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_UpperCAmelCase = os.path.join(__lowercase , "checkpoint.pt" )
if not os.path.isfile(__lowercase ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
_UpperCAmelCase = torch.load(__lowercase , map_location="cpu" )
_UpperCAmelCase = chkpt["cfg"]["model"]
# dicts
_UpperCAmelCase = os.path.join(__lowercase , "dict.txt" )
if not os.path.isfile(__lowercase ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
_UpperCAmelCase = Dictionary.load(__lowercase )
_UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase = len(__lowercase )
_UpperCAmelCase = os.path.join(__lowercase , VOCAB_FILES_NAMES["vocab_file"] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(__lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowercase , ensure_ascii=__lowercase , indent=__lowercase ) )
# merges_file (bpecodes)
_UpperCAmelCase = os.path.join(__lowercase , "bpecodes" )
if not os.path.isfile(__lowercase ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
_UpperCAmelCase = os.path.join(__lowercase , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(__lowercase , __lowercase )
# model config
_UpperCAmelCase = os.path.join(__lowercase , "config.json" )
_UpperCAmelCase = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(__lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowercase , ensure_ascii=__lowercase , indent=__lowercase ) )
# tokenizer config
_UpperCAmelCase = os.path.join(__lowercase , __lowercase )
_UpperCAmelCase = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(__lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowercase , ensure_ascii=__lowercase , indent=__lowercase ) )
# model
_UpperCAmelCase = chkpt["model"]
# remove unneeded keys
_UpperCAmelCase = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(__lowercase , __lowercase )
_UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
_UpperCAmelCase = model_state_dict.pop(__lowercase )
else:
_UpperCAmelCase = model_state_dict.pop(__lowercase )
_UpperCAmelCase = BioGptConfig.from_pretrained(__lowercase )
_UpperCAmelCase = BioGptForCausalLM(__lowercase )
# check that it loads ok
model_new.load_state_dict(__lowercase )
# save
_UpperCAmelCase = os.path.join(__lowercase , __lowercase )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(__lowercase , __lowercase )
print("Conversion is done!" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 22 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A (self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
A = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A = VideoClassificationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase , top_k=2 )
A = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def A (self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
for example in examples:
A = video_classifier(_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
] , )
@require_torch
def A (self : Optional[Any] ):
A = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
A = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
A = pipeline(
"""video-classification""" , model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , frame_sampling_rate=4 )
A = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A = video_classifier(_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , )
A = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] , )
@require_tf
def A (self : List[Any] ):
pass
| 258 | 0 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCAmelCase : List[str] = gray_code_sequence_string(_lowerCAmelCase )
#
# convert them to integers
for i in range(len(_lowerCAmelCase ) ):
UpperCAmelCase : Optional[int] = int(sequence[i] , 2 )
return sequence
def snake_case_ ( _lowerCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase : Optional[int] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase : Optional[Any] = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase : Tuple = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase : Tuple = '''0''' + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase : Optional[Any] = '''1''' + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCamelCase : int = 10
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
for i in range(UpperCAmelCase , UpperCAmelCase ):
if array[i] == target:
return i
return -1
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = 0
A = len(UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A = one_third - 1
elif array[two_third] < target:
A = two_third + 1
else:
A = one_third + 1
A = two_third - 1
else:
return -1
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCAmelCase , one_third - 1 , UpperCAmelCase , UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCAmelCase , UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : str = input('Enter numbers separated by comma:\n').strip()
_lowerCamelCase : str = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCamelCase : Optional[int] = int(input('Enter the number to be found in the list:\n').strip())
_lowerCamelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCamelCase : Union[str, Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('Not found')
| 258 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Any , *a__ : Tuple , **a__ : List[Any] ):
"""simple docstring"""
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__ )
| 24 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = CycleDiffusionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A (self : int ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A = CLIPTextModel(_lowerCAmelCase )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A (self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=0 ):
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
A = image / 2 + 0.5
if str(_lowerCAmelCase ).startswith("""mps""" ):
A = torch.manual_seed(_lowerCAmelCase )
else:
A = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
A = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def A (self : Any ):
A = """cpu""" # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A (self : str ):
A = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCAmelCase , """half""" ):
A = module.half()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A (self : Optional[int] ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def A (self : Optional[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def A (self : Dict ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A (self : Optional[Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A (self : Optional[int] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 258 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = '''mgp-str'''
def __init__(self , SCREAMING_SNAKE_CASE__=[32, 1_28] , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=27 , SCREAMING_SNAKE_CASE__=38 , SCREAMING_SNAKE_CASE__=5_02_57 , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.02 , **SCREAMING_SNAKE_CASE__ , ) -> List[str]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : int = patch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = max_token_length
SCREAMING_SNAKE_CASE__ : Any = num_character_labels
SCREAMING_SNAKE_CASE__ : Tuple = num_bpe_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_wordpiece_labels
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE__ : Tuple = distilled
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = drop_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = qkv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attn_drop_rate
SCREAMING_SNAKE_CASE__ : Dict = drop_path_rate
SCREAMING_SNAKE_CASE__ : Any = output_aa_attentions
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
| 25 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Tuple , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A (self : Any , _lowerCAmelCase : str=None ):
A = {}
if top_k is not None:
A = top_k
return {}, {}, postprocess_params
def __call__(self : str , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : int ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = load_image(_lowerCAmelCase )
A = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A (self : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
A = self.model(**_lowerCAmelCase )
return model_outputs
def A (self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=5 ):
if top_k > self.model.config.num_labels:
A = self.model.config.num_labels
if self.framework == "pt":
A = model_outputs.logits.softmax(-1 )[0]
A , A = probs.topk(_lowerCAmelCase )
elif self.framework == "tf":
A = stable_softmax(model_outputs.logits , axis=-1 )[0]
A = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
A , A = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
A = scores.tolist()
A = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCAmelCase , _lowerCAmelCase )]
| 258 | 0 |
def lowerCAmelCase_ ( snake_case_ ):
if not isinstance(snake_case_,snake_case_ ):
_A : Tuple = f'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case_ )
if number < 0:
return False
_A : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__lowercase : str = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple=None ):
# Initialise PyTorch model
__a : Union[str, Any] = XLNetConfig.from_json_file(_SCREAMING_SNAKE_CASE )
__a : str = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
__a : Tuple = finetuning_task
__a : Tuple = GLUE_TASKS_NUM_LABELS[finetuning_task]
__a : Union[str, Any] = XLNetForSequenceClassification(_SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
__a : List[str] = finetuning_task
__a : Tuple = XLNetForQuestionAnswering(_SCREAMING_SNAKE_CASE )
else:
__a : str = XLNetLMHeadModel(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
__a : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F"""Save PyTorch model to {os.path.abspath(_SCREAMING_SNAKE_CASE )}""" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(F"""Save configuration file to {os.path.abspath(_SCREAMING_SNAKE_CASE )}""" )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
__lowercase : List[str] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 27 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BioGptTokenizer
__lowerCAmelCase = False
def A (self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def A (self : Tuple , _lowerCAmelCase : List[str] ):
A = """lower newer"""
A = """lower newer"""
return input_text, output_text
def A (self : List[Any] ):
A = BioGptTokenizer(self.vocab_file , self.merges_file )
A = """lower"""
A = ["""low""", """er</w>"""]
A = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
A = tokens + ["""<unk>"""]
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def A (self : Union[str, Any] ):
A = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 258 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = HfArgumentParser(A__ )
UpperCamelCase = parser.parse_args_into_dataclasses()[0]
UpperCamelCase = TensorFlowBenchmark(args=A__ )
try:
UpperCamelCase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCamelCase = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
UpperCamelCase = ' '.join(str(A__ ).split(' ' )[:-1] )
UpperCamelCase = ''
UpperCamelCase = eval(str(A__ ).split(' ' )[-1] )
UpperCamelCase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A__ )
if len(A__ ) > 0:
UpperCamelCase = full_error_msg + begin_error_msg + str(A__ )
raise ValueError(A__ )
benchmark.run()
if __name__ == "__main__":
main()
| 28 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return np.maximum(0 , UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258 | 0 |
def lowercase__ ( __snake_case : int = 4_000_000 ):
'''simple docstring'''
UpperCAmelCase_ : int = [0, 1]
UpperCAmelCase_ : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCAmelCase_ : str = 0
for j in range(len(__snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 29 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=13 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : str=[1, 2, 1] , _lowerCAmelCase : List[Any]=[2, 2, 4] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Any=2.0 , _lowerCAmelCase : Any=True , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=10 , _lowerCAmelCase : int=8 , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = num_heads
A = window_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = use_absolute_embeddings
A = patch_norm
A = layer_norm_eps
A = initializer_range
A = is_training
A = scope
A = use_labels
A = type_sequence_label_size
A = encoder_stride
def A (self : Dict ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def A (self : Optional[Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A (self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] ):
A = SwinvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase )
A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A (self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
A = SwinvaForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A = 1
A = SwinvaForMaskedImageModeling(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A (self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ):
A = self.type_sequence_label_size
A = SwinvaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A (self : Union[str, Any] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__lowerCAmelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Any ):
A = SwinvaModelTester(self )
A = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
def A (self : Dict ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A (self : int ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A (self : Dict ):
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A (self : Optional[int] ):
pass
def A (self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def A (self : Optional[int] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def A (self : int ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
A = True
A = False
A = True
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.attentions
A = len(self.model_tester.depths )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A = True
A = config.window_size**2
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
A = len(_lowerCAmelCase )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
A = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
A = 2
self.assertEqual(out_len + added_hidden_states , len(_lowerCAmelCase ) )
A = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def A (self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.hidden_states
A = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swinv2 has a different seq_length
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
A = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
A , A , A , A = reshaped_hidden_states[0].shape
A = (
reshaped_hidden_states[0].view(_lowerCAmelCase , _lowerCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A (self : Tuple ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A (self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
def A (self : Optional[int] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def A (self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def A (self : Optional[Any] ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = SwinvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def A (self : Optional[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
A = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A (self : List[str] ):
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A (self : List[str] ):
A = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
_lowerCAmelCase )
A = self.default_image_processor
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
A = model(**_lowerCAmelCase )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
A = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 258 | 0 |
def a ( snake_case__: list ):
'''simple docstring'''
for i in range(len(snake_case__ ) - 1 , 0 , -1 ):
lowercase_ = False
for j in range(snake_case__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ = unsorted[j - 1], unsorted[j]
lowercase_ = True
for j in range(snake_case__ ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ = unsorted[j + 1], unsorted[j]
lowercase_ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 30 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
def get_matched_characters(UpperCAmelCase , UpperCAmelCase ) -> str:
A = []
A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
A = int(max(0 , i - limit ) )
A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCAmelCase )
A = f"""{_stra[0:_stra.index(UpperCAmelCase )]} {_stra[_stra.index(UpperCAmelCase ) + 1:]}"""
return "".join(UpperCAmelCase )
# matching characters
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = len(UpperCAmelCase )
# transposition
A = (
len([(ca, ca) for ca, ca in zip(UpperCAmelCase , UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
A = 0.0
else:
A = (
1
/ 3
* (
match_count / len(UpperCAmelCase )
+ match_count / len(UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 258 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__SCREAMING_SNAKE_CASE : Any = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
__SCREAMING_SNAKE_CASE : Tuple = cvtColor(img, COLOR_BGR2GRAY)
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = cn.convert_to_negative(_UpperCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCAmelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_UpperCAmelCase : str = canny.canny(_UpperCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
assert gg.gaussian_filter(_UpperCAmelCase , 5 , sigma=0.9 ).all()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
_UpperCAmelCase : List[str] = conv.img_convolve(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
assert res.any()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
assert med.median_filter(_UpperCAmelCase , 3 ).any()
def UpperCamelCase_ ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Any = sob.sobel_filter(_UpperCAmelCase )
assert grad.any() and theta.any()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = sp.make_sepia(_UpperCAmelCase , 20 )
assert sepia.all()
def UpperCamelCase_ ( _UpperCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Tuple = bs.Burkes(imread(_UpperCAmelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def UpperCamelCase_ ( _UpperCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Tuple = rs.NearestNeighbour(imread(_UpperCAmelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
_UpperCAmelCase : Tuple = imread(_UpperCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
_UpperCAmelCase : Any = 0
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = image[x_coordinate][y_coordinate]
_UpperCAmelCase : Dict = lbp.get_neighbors_pixel(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_UpperCAmelCase : Dict = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_UpperCAmelCase : List[str] = lbp.local_binary_value(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
assert lbp_image.any()
| 31 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_lowerCamelCase : List[str] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_lowerCamelCase : List[Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_lowerCamelCase : Dict = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def A (self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A (self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score
| 258 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase_ : Any = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : torch.nn.Module , __A : BnbQuantizationConfig , __A : Union[str, os.PathLike] = None , __A : Optional[Dict[str, Union[int, str, torch.device]]] = None , __A : Optional[List[str]] = None , __A : Optional[Dict[Union[int, str], Union[int, str]]] = None , __A : Optional[Union[str, os.PathLike]] = None , __A : bool = False , ) -> Dict:
"""simple docstring"""
a_ : Optional[int] = bnb_quantization_config.load_in_abit
a_ : Any = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
a_ : List[Any] = []
# custom device map
if isinstance(__A , __A ) and len(device_map.keys() ) > 1:
a_ : Optional[int] = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a_ : List[str] = get_keys_to_not_convert(__A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__A )
a_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a_ : List[Any] = []
a_ : str = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__A )
# compatibility with peft
a_ : Any = load_in_abit
a_ : List[Any] = load_in_abit
a_ : List[Any] = get_parameter_device(__A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
a_ : Any = replace_with_bnb_layers(__A , __A , modules_to_not_convert=__A )
# convert param to the right dtype
a_ : Union[str, Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
a_ : int = name.replace('.weight' , '' ).replace('.bias' , '' )
a_ : int = getattr(__A , __A , __A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__A ):
param.to(__A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
a_ : List[Any] = replace_with_bnb_layers(
__A , __A , modules_to_not_convert=__A )
a_ : Any = get_quantized_model_device_map(
__A , __A , __A , max_memory=__A , no_split_module_classes=__A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a_ : Dict = True
a_ : Any = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__A , __A , __A , dtype=bnb_quantization_config.torch_dtype , offload_folder=__A , offload_state_dict=__A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__A , device_map=__A , offload_dir=__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Dict , __A : List[Any]=None , __A : Union[str, Any]=None , __A : Optional[Any]=None ) -> str:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
a_ : Dict = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__A , __A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
a_ : Any = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
a_ : Optional[Any] = {}
a_ : Union[str, Any] = special_dtypes
a_ : Optional[int] = no_split_module_classes
a_ : str = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a_ : Optional[Any] = get_balanced_memory(
__A , low_zero=(device_map == 'balanced_low_0') , max_memory=__A , **__A , )
a_ : int = max_memory
a_ : Optional[int] = infer_auto_device_map(__A , **__A )
if isinstance(__A , __A ):
# check if don't have any quantized module on the cpu
a_ : str = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a_ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[str] , __A : List[Any]=None , __A : List[Any]=None ) -> Any:
"""simple docstring"""
if modules_to_not_convert is None:
a_ : Union[str, Any] = []
a_ , a_ : List[str] = _replace_with_bnb_layers(
__A , __A , __A , __A )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : Tuple=None , __A : Any=None , ) -> Any:
"""simple docstring"""
a_ : int = False
for name, module in model.named_children():
if current_key_name is None:
a_ : List[str] = []
current_key_name.append(__A )
if isinstance(__A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a_ : Union[str, Any] = '.'.join(__A )
a_ : Any = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a_ : int = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a_ : Union[str, Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a_ : Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
a_ : Union[str, Any] = module.weight.data
if module.bias is not None:
a_ : str = module.bias.data
bnb_module.requires_grad_(__A )
setattr(__A , __A , __A )
a_ : int = True
if len(list(module.children() ) ) > 0:
a_ , a_ : List[str] = _replace_with_bnb_layers(
__A , __A , __A , __A )
a_ : List[str] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Optional[int]:
"""simple docstring"""
with init_empty_weights():
a_ : List[str] = deepcopy(__A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a_ : List[str] = find_tied_parameters(__A )
# For compatibility with Accelerate < 0.18
if isinstance(__A , __A ):
a_ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
a_ : Any = sum(__A , [] )
a_ : Optional[int] = len(__A ) > 0
# Check if it is a base model
a_ : Union[str, Any] = False
if hasattr(__A , 'base_model_prefix' ):
a_ : Union[str, Any] = not hasattr(__A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a_ : Tuple = list(model.named_children() )
a_ : str = [list_modules[-1][0]]
# add last module together with tied weights
a_ : List[str] = set(__A ) - set(__A )
a_ : Dict = list(set(__A ) ) + list(__A )
# remove ".weight" from the keys
a_ : List[str] = ['.weight', '.bias']
a_ : List[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a_ : Dict = name.replace(__A , '' )
filtered_module_names.append(__A )
return filtered_module_names
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Any:
"""simple docstring"""
for m in model.modules():
if isinstance(__A , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE_ ( __A : nn.Module ) -> Union[str, Any]:
"""simple docstring"""
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : int , __A : Optional[Any] , __A : List[str] , __A : Dict , __A : Tuple , __A : Optional[Any] ) -> Dict:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(__A , __A , 0 , dtype=__A , value=__A )
a_ : Optional[int] = param_name
a_ : List[str] = model
if "." in tensor_name:
a_ : int = tensor_name.split('.' )
for split in splits[:-1]:
a_ : int = getattr(__A , __A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
a_ : Optional[Any] = new_module
a_ : List[str] = splits[-1]
# offload weights
a_ : Union[str, Any] = False
offload_weight(module._parameters[tensor_name] , __A , __A , index=__A )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __A , index=__A , )
else:
offload_weight(__A , __A , __A , index=__A )
offload_weight(__A , param_name.replace('weight' , 'SCB' ) , __A , index=__A )
set_module_tensor_to_device(__A , __A , 'meta' , dtype=__A , value=torch.empty(*param.size() ) )
| 32 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__lowerCAmelCase = None
class __UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__lowerCAmelCase = PandasConfig
def A (self : Tuple ):
return datasets.DatasetInfo(features=self.config.features )
def A (self : Optional[int] , _lowerCAmelCase : List[Any] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
A = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def A (self : Dict , _lowerCAmelCase : pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A = table_cast(_lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def A (self : List[Any] , _lowerCAmelCase : Optional[Any] ):
for i, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
with open(_lowerCAmelCase , """rb""" ) as f:
A = pa.Table.from_pandas(pd.read_pickle(_lowerCAmelCase ) )
yield i, self._cast_table(_lowerCAmelCase )
| 258 | 0 |
"""simple docstring"""
from functools import lru_cache
def lowercase ( __snake_case : int ):
lowercase_ : Dict = 2
lowercase_ : Dict = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def lowercase ( __snake_case : int ):
return len(unique_prime_factors(__snake_case ) )
def lowercase ( __snake_case : list ):
return len(set(__snake_case ) ) in (0, 1)
def lowercase ( __snake_case : int ):
lowercase_ : Dict = 2
while True:
# Increment each value of a generated range
lowercase_ : str = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase_ : Union[str, Any] = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __snake_case : int = 4 ):
lowercase_ : Optional[Any] = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 33 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def __a ( UpperCAmelCase = "" , ) ->bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def __a ( UpperCAmelCase = "" ) ->bool:
"""simple docstring"""
if len(UpperCAmelCase ) == 0:
return True
A = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
A = {}
for character in lower_case_input_str:
A = character_freq_dict.get(UpperCAmelCase , 0 ) + 1
A = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __a ( UpperCAmelCase = "" ) ->None:
"""simple docstring"""
print("""\nFor string = """ , UpperCAmelCase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(UpperCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_lowerCamelCase : Any = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
_lowerCamelCase : Any = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 258 | 0 |
'''simple docstring'''
def snake_case_ (_a : int ):
if not isinstance(_a , _a ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''encodec'''
def __init__(self : List[Any] , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : List[str]=2_4000 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=128 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : int=[8, 5, 4, 2] , _lowerCAmelCase : Any="weight_norm" , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : str="reflect" , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : Tuple=1024 , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=True , **_lowerCAmelCase : List[str] , ):
A = target_bandwidths
A = sampling_rate
A = audio_channels
A = normalize
A = chunk_length_s
A = overlap
A = hidden_size
A = num_filters
A = num_residual_layers
A = upsampling_ratios
A = norm_type
A = kernel_size
A = last_kernel_size
A = residual_kernel_size
A = dilation_growth_rate
A = use_causal_conv
A = pad_mode
A = compress
A = num_lstm_layers
A = trim_right_ratio
A = codebook_size
A = codebook_dim if codebook_dim is not None else hidden_size
A = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_lowerCAmelCase )
@property
def A (self : List[Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A (self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A (self : Any ):
A = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A (self : List[str] ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
def lowerCamelCase ( self : Optional[Any] ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCamelCase ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCamelCase ( self : str ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCamelCase ( self : List[str] ):
snake_case__ : str = torch.arange(self.height * self.width )
snake_case__ : Optional[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(snake_case_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def lowerCamelCase ( self : Tuple ):
snake_case__ , *snake_case__ : List[Any] = self.shape
snake_case__ : str = int(np.prod(snake_case_ ) )
snake_case__ : List[Any] = self.get_image_coords()
snake_case__ : Tuple = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
snake_case__ : int = self.get_camera_rays(snake_case_ )
snake_case__ : Tuple = rays.view(snake_case_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCamelCase ( self : Any , snake_case_ : torch.Tensor ):
snake_case__ , *snake_case__ , snake_case__ : Union[str, Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
snake_case__ : int = coords.view(snake_case_ , -1 , 2 )
snake_case__ : Dict = self.resolution()
snake_case__ : List[str] = self.fov()
snake_case__ : Union[str, Any] = (flat.float() / (res - 1)) * 2 - 1
snake_case__ : str = fracs * torch.tan(fov / 2 )
snake_case__ : int = fracs.view(snake_case_ , -1 , 2 )
snake_case__ : List[str] = (
self.z.view(snake_case_ , 1 , 3 )
+ self.x.view(snake_case_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(snake_case_ , 1 , 3 ) * fracs[:, :, 1:]
)
snake_case__ : str = directions / directions.norm(dim=-1 , keepdim=snake_case_ )
snake_case__ : Dict = torch.stack(
[
torch.broadcast_to(self.origin.view(snake_case_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(snake_case_ , *snake_case_ , 2 , 3 )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : int , snake_case_ : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=snake_case_ , height=snake_case_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __snake_case( _lowerCAmelCase ) -> DifferentiableProjectiveCamera:
snake_case__ : Union[str, Any] = []
snake_case__ : int = []
snake_case__ : List[Any] = []
snake_case__ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
snake_case__ : Any = np.array([np.sin(_lowerCAmelCase ), np.cos(_lowerCAmelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
snake_case__ : Optional[int] = -z * 4
snake_case__ : List[str] = np.array([np.cos(_lowerCAmelCase ), -np.sin(_lowerCAmelCase ), 0.0] )
snake_case__ : Optional[int] = np.cross(_lowerCAmelCase , _lowerCAmelCase )
origins.append(_lowerCAmelCase )
xs.append(_lowerCAmelCase )
ys.append(_lowerCAmelCase )
zs.append(_lowerCAmelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowerCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowerCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowerCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowerCAmelCase , axis=0 ) ).float() , width=_lowerCAmelCase , height=_lowerCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowerCAmelCase )) , )
| 35 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | 0 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set()
# edges = list of graph's edges
_lowerCAmelCase : Dict = get_edges(_lowerCamelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = edges.pop()
chosen_vertices.add(_lowerCamelCase )
chosen_vertices.add(_lowerCamelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_lowerCamelCase )
return chosen_vertices
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 36 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : str ):
A = """ylacombe/bark-small"""
A = tempfile.mkdtemp()
A = """en_speaker_1"""
A = """This is a test string"""
A = """speaker_embeddings_path.json"""
A = """speaker_embeddings"""
def A (self : Optional[Any] , **_lowerCAmelCase : Any ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCAmelCase )
def A (self : Dict ):
shutil.rmtree(self.tmpdirname )
def A (self : Optional[Any] ):
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
A = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def A (self : int ):
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def A (self : Union[str, Any] ):
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
A = 35
A = 2
A = 8
A = {
"""semantic_prompt""": np.ones(_lowerCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
A = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
A = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_lowerCAmelCase , **_lowerCAmelCase )
A = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
A = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
A = processor(text=self.input_string , voice_preset=self.voice_preset )
def A (self : str ):
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=_lowerCAmelCase )
A = processor(text=self.input_string )
A = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 258 | 0 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> str:
if not conversation_id:
lowerCAmelCase__ : List[str] = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase__ : List[Any] = []
if generated_responses is None:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : uuid.UUID = conversation_id
lowerCAmelCase__ : List[str] = past_user_inputs
lowerCAmelCase__ : List[str] = generated_responses
lowerCAmelCase__ : Optional[str] = text
def __eq__( self ,__UpperCAmelCase ) -> Dict:
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Optional[Any]:
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
lowerCAmelCase__ : Optional[int] = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowerCAmelCase__ : Optional[Any] = text
def UpperCAmelCase_ ( self ) -> List[Any]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase__ : Union[str, Any] = None
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
self.generated_responses.append(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Tuple:
lowerCAmelCase__ : Tuple = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowerCAmelCase__ : Any = """user""" if is_user else """bot"""
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase__ : Tuple = self.tokenizer.eos_token
def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : List[str] = {}
if min_length_for_response is not None:
lowerCAmelCase__ : Any = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase__ : Optional[int] = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase__ : Optional[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase__ : int = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ,**__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase ,num_workers=__UpperCAmelCase ,**__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=32 ) -> Dict[str, Any]:
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ):
lowerCAmelCase__ : str = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase__ : List[Any] = self._legacy_parse_and_tokenize(__UpperCAmelCase )
if self.framework == "pt":
lowerCAmelCase__ : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase__ : Dict = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=10 ,**__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
lowerCAmelCase__ : Optional[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowerCAmelCase__ : str = max_length - minimum_tokens
lowerCAmelCase__ : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase__ : Tuple = model_inputs["""attention_mask"""][:, -trim:]
lowerCAmelCase__ : str = model_inputs.pop("""conversation""" )
lowerCAmelCase__ : Union[str, Any] = max_length
lowerCAmelCase__ : Any = self.model.generate(**__UpperCAmelCase ,**__UpperCAmelCase )
if self.model.config.is_encoder_decoder:
lowerCAmelCase__ : int = 1
else:
lowerCAmelCase__ : int = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ) -> List[str]:
lowerCAmelCase__ : Optional[int] = model_outputs["""output_ids"""]
lowerCAmelCase__ : Tuple = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,)
lowerCAmelCase__ : Union[str, Any] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__UpperCAmelCase )
return conversation
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Dict = self.tokenizer.eos_token_id
lowerCAmelCase__ : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > self.tokenizer.model_max_length:
lowerCAmelCase__ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 37 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
_lowerCamelCase : List[Any] = 6_3_7_8_1_3_7.0
_lowerCamelCase : List[Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
_lowerCamelCase : Optional[int] = 637_8137
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
A = (AXIS_A - AXIS_B) / AXIS_A
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = radians(UpperCAmelCase )
A = radians(UpperCAmelCase )
# Equation
A = sin((phi_a - phi_a) / 2 )
A = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A = sqrt(sin_sq_phi + (cos(UpperCAmelCase ) * cos(UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 0 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> List[str]:
"""simple docstring"""
UpperCamelCase :str = []
UpperCamelCase :List[str] = set({"""(""", """[""", """{"""} )
UpperCamelCase :Any = set({""")""", """]""", """}"""} )
UpperCamelCase :str = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(__magic_name__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__magic_name__ ) == 0 or (len(__magic_name__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__magic_name__ ) == 0
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :int = input("""Enter sequence of brackets: """ )
if is_balanced(__magic_name__ ):
print(__magic_name__ , """is balanced""" )
else:
print(__magic_name__ , """is not balanced""" )
if __name__ == "__main__":
main()
| 38 |
'''simple docstring'''
import os
import sys
import unittest
_lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : str = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
_lowerCamelCase : Tuple = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Any ):
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = {"""BertModelTest""": """BertModelTester"""}
A = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : str ):
A = get_model_to_test_mapping(_lowerCAmelCase )
A = get_model_to_test_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
| 258 | 0 |
# flake8: noqa
# Lint as: python3
_a = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 39 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if len(UpperCAmelCase ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase , n - 1 )
rec_insertion_sort(UpperCAmelCase , n - 1 )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if index >= len(UpperCAmelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A , A = (
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase , index + 1 )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input('Enter integers separated by spaces: ')
_lowerCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 258 | 0 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowercase ( A_ )-> List[str]:
'''simple docstring'''
a : List[str] = {}
a : Optional[Any] = tokenizer(example["content"] , truncation=A_ )["input_ids"]
a : Optional[Any] = len(example["content"] ) / len(output["input_ids"] )
return output
__lowercase = HfArgumentParser(PretokenizationArguments)
__lowercase = parser.parse_args()
if args.num_workers is None:
__lowercase = multiprocessing.cpu_count()
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowercase = time.time()
__lowercase = load_dataset(args.dataset_name, split="""train""")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowercase = time.time()
__lowercase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowercase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 40 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __a ( UpperCAmelCase ) ->Tuple: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __a ( UpperCAmelCase ) ->int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def A (self : Tuple ):
A = {}
A = []
A = 1
A = [1, 2]
A = {"""a""": 1, """b""": 2}
A = {"""a""": [1, 2], """b""": [3, 4]}
A = {"""a""": {"""1""": 1}, """b""": 2}
A = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
A = {}
A = []
A = 2
A = [2, 3]
A = {"""a""": 2, """b""": 3}
A = {"""a""": [2, 3], """b""": [4, 5]}
A = {"""a""": {"""1""": 2}, """b""": 3}
A = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
A = 2
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
A = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
A = {"""a""": 2, """b""": 0, """c""": 2}
A = {
"""a""": np.eye(2 ).astype(_lowerCAmelCase ),
"""b""": np.zeros(3 ).astype(_lowerCAmelCase ),
"""c""": np.ones(2 ).astype(_lowerCAmelCase ),
}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCAmelCase : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase )
def A (self : List[Any] ):
A = {"""a""": 1, """b""": 2}
A = {"""a""": 3, """b""": 4}
A = {"""a""": 5, """b""": 6}
A = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = '''bar'''
A = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_lowerCAmelCase , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
A = {f"""{i}""": i for i in range(UpperCAmelCase )}
A = map_nested(lambda UpperCAmelCase : x + 10 , UpperCAmelCase , num_proc=UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@require_tf
def A (self : Dict ):
import tensorflow as tf
from tensorflow.keras import layers
A = layers.Dense(2 )
def gen_random_output():
A = tf.random.uniform((1, 3) )
return model(_lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A (self : Tuple ):
import torch
def gen_random_output():
A = torch.nn.Linear(3 , 2 )
A = torch.rand(1 , 3 )
return model(_lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A (self : str ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A = gen_random_output()
with temp_seed(42 ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).flatten()
assert output == expected_output
def __a ( ) ->Optional[Any]:
"""simple docstring"""
A = A(x=1 , y="""foobar""" )
A = {"""x""": 1, """y""": """foobar"""}
assert asdict(UpperCAmelCase ) == expected_output
A = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
A = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(UpperCAmelCase ) == expected_output
with pytest.raises(UpperCAmelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return text.split()
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __a ( ) ->Optional[int]:
"""simple docstring"""
with Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(UpperCAmelCase ) == 4
| 258 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : str = 0.0
for coeff in reversed(UpperCamelCase ):
lowerCamelCase__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
_A : Any =(0.0, 0.0, 5.0, 9.3, 7.0)
_A : Optional[Any] =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 41 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __a ( ) ->None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 258 | 0 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 42 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : str , _lowerCAmelCase : PriorTransformer , _lowerCAmelCase : CLIPVisionModel , _lowerCAmelCase : CLIPImageProcessor , _lowerCAmelCase : HeunDiscreteScheduler , _lowerCAmelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=_lowerCAmelCase , image_encoder=_lowerCAmelCase , image_processor=_lowerCAmelCase , scheduler=_lowerCAmelCase , renderer=_lowerCAmelCase , )
def A (self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
if latents is None:
A = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
A = latents.to(_lowerCAmelCase )
A = latents * scheduler.init_noise_sigma
return latents
def A (self : Union[str, Any] , _lowerCAmelCase : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A = torch.device(F"""cuda:{gpu_id}""" )
A = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
def A (self : Optional[Any] ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def A (self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
A = torch.cat(_lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_lowerCAmelCase , axis=0 )
if not isinstance(_lowerCAmelCase , torch.Tensor ):
A = self.image_processor(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
A = image.to(dtype=self.image_encoder.dtype , device=_lowerCAmelCase )
A = self.image_encoder(_lowerCAmelCase )["""last_hidden_state"""]
A = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
A = torch.zeros_like(_lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__(self : List[Any] , _lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 25 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : float = 4.0 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ):
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
A = 1
elif isinstance(_lowerCAmelCase , torch.Tensor ):
A = image.shape[0]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A = len(_lowerCAmelCase )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_lowerCAmelCase )}""" )
A = self._execution_device
A = batch_size * num_images_per_prompt
A = guidance_scale > 1.0
A = self._encode_image(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# prior
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
A = self.scheduler.timesteps
A = self.prior.config.num_embeddings
A = self.prior.config.embedding_dim
A = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A = latents.reshape(latents.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
A = self.prior(
_lowerCAmelCase , timestep=_lowerCAmelCase , proj_embedding=_lowerCAmelCase , ).predicted_image_embedding
# remove the variance
A , A = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A , A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A = self.scheduler.step(
_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_lowerCAmelCase )
A = []
for i, latent in enumerate(_lowerCAmelCase ):
print()
A = self.renderer.decode(
latent[None, :] , _lowerCAmelCase , size=_lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_lowerCAmelCase )
A = torch.stack(_lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
A = images.cpu().numpy()
if output_type == "pil":
A = [self.numpy_to_pil(_lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_lowerCAmelCase )
| 258 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Union[str, Any] ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A = model
A = kwargs.get("""model_save_dir""" , _lowerCAmelCase )
A = kwargs.get("""latest_model_name""" , _lowerCAmelCase )
def __call__(self : Tuple , **_lowerCAmelCase : Optional[Any] ):
A = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def A (_lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A = """CPUExecutionProvider"""
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : List[str] ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A = self.model_save_dir.joinpath(self.latest_model_name )
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def A (self : Tuple , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : str , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Union[bool, str, None]] = None , _lowerCAmelCase : Optional[Union[str, None]] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional["ort.SessionOptions"] = None , **_lowerCAmelCase : Optional[int] , ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
A = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
A = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
A = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
A = Path(_lowerCAmelCase ).parent
A = Path(_lowerCAmelCase ).name
A = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : str , ):
A = None
if len(str(_lowerCAmelCase ).split("""@""" ) ) == 2:
A , A = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 258 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> bool:
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) + 1
_lowerCAmelCase : Any = len(_lowerCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_lowerCAmelCase : Union[str, Any] = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
# since string of zero length match pattern of zero length
_lowerCAmelCase : Tuple = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 ,_lowerCamelCase ):
_lowerCAmelCase : List[str] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 ,_lowerCamelCase ):
_lowerCAmelCase : Any = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 ,_lowerCamelCase ):
for j in range(1 ,_lowerCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_lowerCAmelCase : int = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_lowerCAmelCase : Any = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_lowerCAmelCase : Optional[Any] = dp[i - 1][j]
else:
_lowerCAmelCase : Dict = 0
else:
_lowerCAmelCase : str = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_a : Tuple = 'aab'
_a : int = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 44 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A (self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
A = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A = VideoClassificationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase , top_k=2 )
A = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def A (self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
for example in examples:
A = video_classifier(_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
] , )
@require_torch
def A (self : Optional[Any] ):
A = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
A = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
A = pipeline(
"""video-classification""" , model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , frame_sampling_rate=4 )
A = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A = video_classifier(_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , )
A = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] , )
@require_tf
def A (self : List[Any] ):
pass
| 258 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCamelCase : int = 10
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
for i in range(UpperCAmelCase , UpperCAmelCase ):
if array[i] == target:
return i
return -1
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = 0
A = len(UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A = one_third - 1
elif array[two_third] < target:
A = two_third + 1
else:
A = one_third + 1
A = two_third - 1
else:
return -1
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A = (left + right) // 3 + 1
A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCAmelCase , one_third - 1 , UpperCAmelCase , UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCAmelCase , UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : str = input('Enter numbers separated by comma:\n').strip()
_lowerCamelCase : str = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCamelCase : Optional[int] = int(input('Enter the number to be found in the list:\n').strip())
_lowerCamelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCamelCase : Union[str, Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('Not found')
| 258 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if "cls_token" in name:
lowerCAmelCase = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowerCAmelCase = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowerCAmelCase = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowerCAmelCase = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowerCAmelCase = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowerCAmelCase = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
lowerCAmelCase = key.split(""".""" )
lowerCAmelCase = int(key_split[1] )
if "decoder_blocks" in key:
lowerCAmelCase = config.decoder_hidden_size
lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
else:
lowerCAmelCase = config.hidden_size
lowerCAmelCase = """vit.encoder.layer."""
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
else:
lowerCAmelCase = val
return orig_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCAmelCase = 10_24
lowerCAmelCase = 40_96
lowerCAmelCase = 24
lowerCAmelCase = 16
elif "huge" in checkpoint_url:
lowerCAmelCase = 14
lowerCAmelCase = 12_80
lowerCAmelCase = 51_20
lowerCAmelCase = 32
lowerCAmelCase = 16
lowerCAmelCase = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.logits
if "large" in checkpoint_url:
lowerCAmelCase = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
lowerCAmelCase = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
lowerCAmelCase = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = CycleDiffusionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A (self : int ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A = CLIPTextModel(_lowerCAmelCase )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A (self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=0 ):
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
A = image / 2 + 0.5
if str(_lowerCAmelCase ).startswith("""mps""" ):
A = torch.manual_seed(_lowerCAmelCase )
else:
A = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
A = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def A (self : Any ):
A = """cpu""" # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A (self : str ):
A = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCAmelCase , """half""" ):
A = module.half()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A (self : Optional[int] ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def A (self : Optional[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def A (self : Dict ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A (self : Optional[Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A (self : Optional[int] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 258 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( A__ ):
A__ = 'realm'
def __init__( self : List[str] , _a : Dict=3_0522 , _a : Optional[int]=768 , _a : str=128 , _a : Dict=12 , _a : Union[str, Any]=12 , _a : Tuple=8 , _a : Any=3072 , _a : List[str]="gelu_new" , _a : Dict=0.1 , _a : Optional[int]=0.1 , _a : Optional[int]=512 , _a : List[str]=2 , _a : List[str]=0.02 , _a : int=1e-12 , _a : List[Any]=256 , _a : Dict=10 , _a : Union[str, Any]=1e-3 , _a : List[Any]=5 , _a : Optional[int]=320 , _a : Optional[Any]=1335_3718 , _a : str=5000 , _a : List[str]=1 , _a : int=0 , _a : Optional[int]=2 , **_a : List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
# Common config
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =retriever_proj_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =num_candidates
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =layer_norm_eps
# Reader config
_SCREAMING_SNAKE_CASE =span_hidden_size
_SCREAMING_SNAKE_CASE =max_span_width
_SCREAMING_SNAKE_CASE =reader_layer_norm_eps
_SCREAMING_SNAKE_CASE =reader_beam_size
_SCREAMING_SNAKE_CASE =reader_seq_len
# Retrieval config
_SCREAMING_SNAKE_CASE =num_block_records
_SCREAMING_SNAKE_CASE =searcher_beam_size
| 47 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Tuple , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A (self : Any , _lowerCAmelCase : str=None ):
A = {}
if top_k is not None:
A = top_k
return {}, {}, postprocess_params
def __call__(self : str , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : int ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = load_image(_lowerCAmelCase )
A = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A (self : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
A = self.model(**_lowerCAmelCase )
return model_outputs
def A (self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=5 ):
if top_k > self.model.config.num_labels:
A = self.model.config.num_labels
if self.framework == "pt":
A = model_outputs.logits.softmax(-1 )[0]
A , A = probs.topk(_lowerCAmelCase )
elif self.framework == "tf":
A = stable_softmax(model_outputs.logits , axis=-1 )[0]
A = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
A , A = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
A = scores.tolist()
A = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCAmelCase , _lowerCAmelCase )]
| 258 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """resnet"""
lowerCamelCase_ : List[Any] = ["""basic""", """bottleneck"""]
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=64 , UpperCamelCase__=[256, 512, 1024, 2048] , UpperCamelCase__=[3, 4, 6, 3] , UpperCamelCase__="bottleneck" , UpperCamelCase__="relu" , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> str:
super().__init__(**UpperCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : List[str] = embedding_size
lowerCamelCase : Any = hidden_sizes
lowerCamelCase : List[str] = depths
lowerCamelCase : Optional[int] = layer_type
lowerCamelCase : Any = hidden_act
lowerCamelCase : Optional[Any] = downsample_in_first_stage
lowerCamelCase : List[str] = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
lowerCamelCase , lowerCamelCase : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = version.parse("""1.11""" )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase ( self ) -> float:
return 1e-3
| 48 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ):
__a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__a = f'{olid} is not a valid Open Library olid'
raise ValueError(_UpperCAmelCase )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def __snake_case ( _UpperCAmelCase ):
__a = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__a = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__a = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = ''', '''.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 49 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BioGptTokenizer
__lowerCAmelCase = False
def A (self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def A (self : Tuple , _lowerCAmelCase : List[str] ):
A = """lower newer"""
A = """lower newer"""
return input_text, output_text
def A (self : List[Any] ):
A = BioGptTokenizer(self.vocab_file , self.merges_file )
A = """lower"""
A = ["""low""", """er</w>"""]
A = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
A = tokens + ["""<unk>"""]
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def A (self : Union[str, Any] ):
A = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 258 | 0 |
from manim import *
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : List[Any] = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase__ : Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowerCamelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : Any = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase__ : Any = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase__ : int = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase__ : List[Any] = Text('CPU' , font_size=24 )
lowerCamelCase__ : Union[str, Any] = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
lowerCamelCase__ : List[Any] = [mem.copy() for i in range(1 )]
lowerCamelCase__ : str = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase__ : int = Text('GPU' , font_size=24 )
lowerCamelCase__ : Optional[Any] = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.align_to(UpperCAmelCase , UpperCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : Tuple = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase__ : Dict = Text('Model' , font_size=24 )
lowerCamelCase__ : Dict = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) , )
lowerCamelCase__ : Dict = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
lowerCamelCase__ : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=2.5 ) , Write(UpperCAmelCase ) , Write(UpperCAmelCase ) )
self.add(UpperCAmelCase )
lowerCamelCase__ : int = []
lowerCamelCase__ : str = []
lowerCamelCase__ : Dict = []
for i, rect in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.7 )
cpu_target.move_to(UpperCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ : Dict = 0.4_6 / 4
lowerCamelCase__ : List[str] = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=UpperCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCAmelCase , buff=0.0 )
cpu_targs.append(UpperCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCAmelCase ) )
second_animations.append(MoveToTarget(UpperCAmelCase , run_time=1.5 ) )
self.play(*UpperCAmelCase )
self.play(*UpperCAmelCase )
self.wait()
| 50 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return np.maximum(0 , UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
def A (__A : Union[tf.Tensor, np.ndarray] ) -> List[int]:
"""simple docstring"""
if isinstance(__A , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase_ = tf.shape(__A )
if tensor.shape == tf.TensorShape(__A ):
return dynamic
UpperCAmelCase_ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__A )]
def A (__A : tf.Tensor , __A : Optional[int] = None , __A : Optional[str] = None ) -> tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=__A , name=__A )
def A (__A : Union[str, Any] , __A : Optional[int] , __A : str , __A : Optional[int]=1E-5 , __A : int=-1 ) -> Any:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__A , __A ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
UpperCAmelCase_ , UpperCAmelCase_ = tf.nn.moments(__A , axes=[axis] , keepdims=__A )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase_ = [1] * inputs.shape.rank
UpperCAmelCase_ = shape_list(__A )[axis]
UpperCAmelCase_ = tf.reshape(__A , __A )
UpperCAmelCase_ = tf.reshape(__A , __A )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase_ = tf.nn.batch_normalization(
__A , __A , __A , offset=__A , scale=__A , variance_epsilon=__A , )
return outputs
def A (__A : Any , __A : List[Any]=0 , __A : Tuple=-1 ) -> Optional[Any]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase_ = tf.shape(__A )
UpperCAmelCase_ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase_ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__A , __A )
def A (__A : tf.Tensor ) -> tf.Tensor:
"""simple docstring"""
if not isinstance(__A , tf.Tensor ):
UpperCAmelCase_ = tf.convert_to_tensor(__A ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase_ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase_ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def A (__A : tf.Tensor , __A : int , __A : str = "input_ids" ) -> None:
"""simple docstring"""
tf.debugging.assert_less(
__A , tf.cast(__A , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(__A )}) must be smaller than the embedding """
F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def A (__A : Tuple , __A : Dict , __A : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase_ = [x for x in data if len(__A ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
UpperCAmelCase_ = np.asarray(__A )
UpperCAmelCase_ = 1
UpperCAmelCase_ = np.array_split(__A , __A )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase_ = np.array_split(__A , __A )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__A ):
UpperCAmelCase_ = chunk_data
else:
UpperCAmelCase_ = data
def A (__A : List[Any] , __A : Any ) -> int:
"""simple docstring"""
if name in group.attrs:
UpperCAmelCase_ = [n.decode('''utf8''' ) if hasattr(__A , '''decode''' ) else n for n in group.attrs[name]]
else:
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__A , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def A (__A : List[Any] ) -> Any:
"""simple docstring"""
def _expand_single_ad_tensor(__A : Optional[int] ):
if isinstance(__A , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__A , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __A )
| 51 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=13 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : str=[1, 2, 1] , _lowerCAmelCase : List[Any]=[2, 2, 4] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Any=2.0 , _lowerCAmelCase : Any=True , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=10 , _lowerCAmelCase : int=8 , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = num_heads
A = window_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = use_absolute_embeddings
A = patch_norm
A = layer_norm_eps
A = initializer_range
A = is_training
A = scope
A = use_labels
A = type_sequence_label_size
A = encoder_stride
def A (self : Dict ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def A (self : Optional[Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A (self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] ):
A = SwinvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase )
A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A (self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
A = SwinvaForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A = 1
A = SwinvaForMaskedImageModeling(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A (self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ):
A = self.type_sequence_label_size
A = SwinvaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A (self : Union[str, Any] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__lowerCAmelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Any ):
A = SwinvaModelTester(self )
A = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
def A (self : Dict ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A (self : int ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A (self : Dict ):
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A (self : Optional[int] ):
pass
def A (self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def A (self : Optional[int] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def A (self : int ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
A = True
A = False
A = True
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.attentions
A = len(self.model_tester.depths )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A = True
A = config.window_size**2
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
A = len(_lowerCAmelCase )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
A = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
A = 2
self.assertEqual(out_len + added_hidden_states , len(_lowerCAmelCase ) )
A = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def A (self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
A = outputs.hidden_states
A = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swinv2 has a different seq_length
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
A = outputs.reshaped_hidden_states
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
A , A , A , A = reshaped_hidden_states[0].shape
A = (
reshaped_hidden_states[0].view(_lowerCAmelCase , _lowerCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A (self : Tuple ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A (self : List[str] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
def A (self : Optional[int] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def A (self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def A (self : Optional[Any] ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = SwinvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def A (self : Optional[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
A = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A (self : List[str] ):
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A (self : List[str] ):
A = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
_lowerCAmelCase )
A = self.default_image_processor
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
A = model(**_lowerCAmelCase )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
A = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 258 | 0 |
from collections import defaultdict
from math import gcd
def A_ ( _lowerCAmelCase = 150_0000 ) -> int:
UpperCamelCase : defaultdict = defaultdict(_lowerCAmelCase )
UpperCamelCase : Dict = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _lowerCAmelCase , 2 ):
if gcd(_lowerCAmelCase , _lowerCAmelCase ) > 1:
continue
UpperCamelCase : Optional[int] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_lowerCAmelCase , limit + 1 , _lowerCAmelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 52 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
def get_matched_characters(UpperCAmelCase , UpperCAmelCase ) -> str:
A = []
A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
A = int(max(0 , i - limit ) )
A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCAmelCase )
A = f"""{_stra[0:_stra.index(UpperCAmelCase )]} {_stra[_stra.index(UpperCAmelCase ) + 1:]}"""
return "".join(UpperCAmelCase )
# matching characters
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = get_matched_characters(UpperCAmelCase , UpperCAmelCase )
A = len(UpperCAmelCase )
# transposition
A = (
len([(ca, ca) for ca, ca in zip(UpperCAmelCase , UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
A = 0.0
else:
A = (
1
/ 3
* (
match_count / len(UpperCAmelCase )
+ match_count / len(UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 258 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
a__ : int =[
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
a__ : Any =[[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase__ ( __lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
__UpperCamelCase = []
for i in range(len(__lowercase ) ):
__UpperCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__UpperCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__lowercase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__lowercase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__lowercase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__UpperCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__lowercase )
return next_generation
def lowercase__ ( __lowercase : list[list[int]] , __lowercase : int ) -> list[Image.Image]:
"""simple docstring"""
__UpperCamelCase = []
for _ in range(__lowercase ):
# Create output image
__UpperCamelCase = Image.new('RGB' , (len(cells[0] ), len(__lowercase )) )
__UpperCamelCase = img.load()
# Save cells to image
for x in range(len(__lowercase ) ):
for y in range(len(cells[0] ) ):
__UpperCamelCase = 255 - cells[y][x] * 255
__UpperCamelCase = (colour, colour, colour)
# Save image
images.append(__lowercase )
__UpperCamelCase = new_generation(__lowercase )
return images
if __name__ == "__main__":
a__ : Optional[int] =generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 53 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_lowerCamelCase : List[str] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_lowerCamelCase : List[Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_lowerCamelCase : Dict = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def A (self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A (self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score
| 258 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : int = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__SCREAMING_SNAKE_CASE = k.replace(lowerCAmelCase_ , lowerCAmelCase_ )
return k
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DEFAULTS.copy()
cfg_kwargs.update(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = PegasusConfig(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = PegasusForConditionalGeneration(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch_model.model.state_dict()
__SCREAMING_SNAKE_CASE = {}
for k, v in tf_weights.items():
__SCREAMING_SNAKE_CASE = rename_state_dict_key(lowerCAmelCase_ )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__SCREAMING_SNAKE_CASE = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
__SCREAMING_SNAKE_CASE = mapping["shared.weight"]
__SCREAMING_SNAKE_CASE = mapping["shared.weight"]
__SCREAMING_SNAKE_CASE = {k: torch.zeros_like(lowerCAmelCase_ ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase__ (lowerCAmelCase_="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tf.train.list_variables(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = ["Adafactor", "global_step"]
for name, shape in tqdm(lowerCAmelCase_ , desc="converting tf checkpoint to dict" ):
__SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = array
return tf_weights
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ ).parent.name
__SCREAMING_SNAKE_CASE = task_specific_params[f"""summarization_{dataset}"""]["max_position_embeddings"]
__SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=lowerCAmelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowerCAmelCase_ )
# convert model
__SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
__SCREAMING_SNAKE_CASE = task_specific_params
__SCREAMING_SNAKE_CASE = convert_pegasus(lowerCAmelCase_ , lowerCAmelCase_ )
torch_model.save_pretrained(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(lowerCAmelCase_ , Path(lowerCAmelCase_ ) / "pytorch_model.bin" )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : int = Path(args.tf_ckpt_path).parent.name
a__ : Tuple = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 54 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__lowerCAmelCase = None
class __UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__lowerCAmelCase = PandasConfig
def A (self : Tuple ):
return datasets.DatasetInfo(features=self.config.features )
def A (self : Optional[int] , _lowerCAmelCase : List[Any] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
A = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def A (self : Dict , _lowerCAmelCase : pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A = table_cast(_lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def A (self : List[Any] , _lowerCAmelCase : Optional[Any] ):
for i, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
with open(_lowerCAmelCase , """rb""" ) as f:
A = pa.Table.from_pandas(pd.read_pickle(_lowerCAmelCase ) )
yield i, self._cast_table(_lowerCAmelCase )
| 258 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ : Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = XGLMTokenizer
_lowerCamelCase = XGLMTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = XGLMTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "<pad>"
lowerCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(UpperCamelCase ) , 1008 )
def snake_case ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = XGLMTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowerCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def snake_case ( self ):
"""simple docstring"""
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def snake_case ( self ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase , f.name )
lowerCamelCase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase )
lowerCamelCase_ = pickle.dumps(UpperCamelCase )
pickle.loads(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = "I was born in 92000, and this is falsé."
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase )
lowerCamelCase_ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowerCamelCase_ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(UpperCamelCase )
lowerCamelCase_ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "Hello World!"
lowerCamelCase_ = [2, 3_1227, 4447, 35]
self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
lowerCamelCase_ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) )
@slow
def snake_case ( self ):
"""simple docstring"""
# fmt: off
lowerCamelCase_ = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="facebook/xglm-564M" , padding=UpperCamelCase , )
| 55 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def __a ( UpperCAmelCase = "" , ) ->bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def __a ( UpperCAmelCase = "" ) ->bool:
"""simple docstring"""
if len(UpperCAmelCase ) == 0:
return True
A = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
A = {}
for character in lower_case_input_str:
A = character_freq_dict.get(UpperCAmelCase , 0 ) + 1
A = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __a ( UpperCAmelCase = "" ) ->None:
"""simple docstring"""
print("""\nFor string = """ , UpperCAmelCase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(UpperCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_lowerCamelCase : Any = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
_lowerCamelCase : Any = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 258 | 0 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
a : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
a : Dict = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
a : Optional[Any] = '|'.join(sys.argv[1:])
a : List[Any] = re.compile(rf'''^({joined_dirs}).*?\.py$''')
a : Tuple = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 56 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''encodec'''
def __init__(self : List[Any] , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : List[str]=2_4000 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=128 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : int=[8, 5, 4, 2] , _lowerCAmelCase : Any="weight_norm" , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : str="reflect" , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : Tuple=1024 , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=True , **_lowerCAmelCase : List[str] , ):
A = target_bandwidths
A = sampling_rate
A = audio_channels
A = normalize
A = chunk_length_s
A = overlap
A = hidden_size
A = num_filters
A = num_residual_layers
A = upsampling_ratios
A = norm_type
A = kernel_size
A = last_kernel_size
A = residual_kernel_size
A = dilation_growth_rate
A = use_causal_conv
A = pad_mode
A = compress
A = num_lstm_layers
A = trim_right_ratio
A = codebook_size
A = codebook_dim if codebook_dim is not None else hidden_size
A = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_lowerCAmelCase )
@property
def A (self : List[Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A (self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A (self : Any ):
A = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A (self : List[str] ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258 | 0 |
"""simple docstring"""
import os
def _lowerCamelCase ( ):
'''simple docstring'''
with open(os.path.dirname(_UpperCamelCase ) + "/p022_names.txt" ) as file:
__lowerCAmelCase = str(file.readlines()[0] )
__lowerCAmelCase = names.replace("\"" , "" ).split("," )
names.sort()
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for i, name in enumerate(_UpperCamelCase ):
for letter in name:
name_score += ord(_UpperCamelCase ) - 64
total_score += (i + 1) * name_score
__lowerCAmelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 57 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | 0 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
lowercase_ = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = CamembertTokenizer
UpperCamelCase = CamembertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def snake_case_( self ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE = CamembertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = """<pad>"""
_SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1004 )
def snake_case_( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = CamembertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
_SCREAMING_SNAKE_CASE = tokenizer.encode(A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A , add_special_tokens=A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
def snake_case_( self ) -> Any:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A , add_special_tokens=A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
@slow
def snake_case_( self ) -> Optional[int]:
# fmt: off
_SCREAMING_SNAKE_CASE = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_SCREAMING_SNAKE_CASE = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=A , )
| 58 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : str ):
A = """ylacombe/bark-small"""
A = tempfile.mkdtemp()
A = """en_speaker_1"""
A = """This is a test string"""
A = """speaker_embeddings_path.json"""
A = """speaker_embeddings"""
def A (self : Optional[Any] , **_lowerCAmelCase : Any ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCAmelCase )
def A (self : Dict ):
shutil.rmtree(self.tmpdirname )
def A (self : Optional[Any] ):
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
A = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def A (self : int ):
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def A (self : Union[str, Any] ):
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
A = 35
A = 2
A = 8
A = {
"""semantic_prompt""": np.ones(_lowerCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
A = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
A = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_lowerCAmelCase , **_lowerCAmelCase )
A = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
A = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
A = processor(text=self.input_string , voice_preset=self.voice_preset )
def A (self : str ):
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=_lowerCAmelCase )
A = processor(text=self.input_string )
A = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 258 | 0 |
import math
import unittest
def UpperCamelCase ( __lowerCamelCase : int ):
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 59 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
_lowerCamelCase : List[Any] = 6_3_7_8_1_3_7.0
_lowerCamelCase : List[Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
_lowerCamelCase : Optional[int] = 637_8137
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
A = (AXIS_A - AXIS_B) / AXIS_A
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = radians(UpperCAmelCase )
A = radians(UpperCAmelCase )
# Equation
A = sin((phi_a - phi_a) / 2 )
A = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A = sqrt(sin_sq_phi + (cos(UpperCAmelCase ) * cos(UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 0 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : str = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : Union[str, Any] = {
'''Salesforce/codegen-350M-mono''': 2_048,
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = CodeGenTokenizer
def __init__( self : Optional[int] , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple="<|endoftext|>" , UpperCamelCase_ : int="<|endoftext|>" , UpperCamelCase_ : Tuple="<|endoftext|>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Dict , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop('''add_bos_token''' , UpperCamelCase_ ):
lowerCAmelCase : str = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowerCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowerCAmelCase : List[str] = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) )
lowerCAmelCase : Optional[Any] = add_prefix_space
lowerCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = add_prefix_space
def lowerCamelCase__ ( self : int , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Any ):
lowerCAmelCase : Dict = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : int ):
lowerCAmelCase : List[str] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
lowerCAmelCase : Tuple = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[List[str]] = None , **UpperCamelCase_ : Tuple , ):
lowerCAmelCase : List[str] = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
lowerCAmelCase : str = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
def find_re(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
lowerCAmelCase : Union[str, Any] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCAmelCase : Optional[Any] = list(re.finditer('''^print''' , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
lowerCAmelCase : int = completion[: prints[1].start()]
lowerCAmelCase : List[str] = list(re.finditer('''^def''' , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
lowerCAmelCase : Union[str, Any] = completion[: defs[1].start()]
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Tuple = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 60 |
'''simple docstring'''
import os
import sys
import unittest
_lowerCamelCase : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : str = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
_lowerCamelCase : Tuple = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Any ):
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = get_test_to_tester_mapping(_lowerCAmelCase )
A = {"""BertModelTest""": """BertModelTester"""}
A = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : str ):
A = get_model_to_test_mapping(_lowerCAmelCase )
A = get_model_to_test_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = get_model_to_tester_mapping(_lowerCAmelCase )
A = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
| 258 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.