code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
def snake_case_ ( A_ : str ):
'''simple docstring'''
assert column_title.isupper()
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Tuple = len(A_ ) - 1
_lowerCamelCase : Dict = 0
while index >= 0:
_lowerCamelCase : str = (ord(column_title[index] ) - 64) * pow(26, A_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 72 |
"""simple docstring"""
import unittest
import numpy as np
def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
if shape_a[0] != shape_b[0]:
_lowerCamelCase : Tuple = (
'''Expected the same number of rows for A and B. '''
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(A_ )
if shape_b[1] != shape_c[1]:
_lowerCamelCase : Tuple = (
'''Expected the same number of columns for B and C. '''
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(A_ )
_lowerCamelCase : List[str] = pseudo_inv
if a_inv is None:
try:
_lowerCamelCase : Any = np.linalg.inv(A_ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] )
_lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] )
_lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase )
self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : int = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 72 | 1 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase__ = 6_3_7_8_1_3_7.0
lowerCAmelCase__ = 6_3_5_6_7_5_2.3_1_4_2_4_5
lowerCAmelCase__ = 6378137
def snake_case_ ( A_ : float, A_ : float, A_ : float, A_ : float ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A
_lowerCamelCase : List[Any] = atan((1 - flattening) * tan(radians(A_ ) ) )
_lowerCamelCase : List[str] = atan((1 - flattening) * tan(radians(A_ ) ) )
_lowerCamelCase : List[str] = radians(A_ )
_lowerCamelCase : Tuple = radians(A_ )
# Equation
_lowerCamelCase : str = sin((phi_a - phi_a) / 2 )
_lowerCamelCase : Optional[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_lowerCamelCase : List[str] = sqrt(sin_sq_phi + (cos(A_ ) * cos(A_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[int], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = int(A_ )
# Initialize Result
_lowerCamelCase : Dict = []
# Traverse through all denomination
for denomination in reversed(A_ ):
# Find denominations
while int(A_ ) >= int(A_ ):
total_value -= int(A_ )
answer.append(A_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase__ = []
lowerCAmelCase__ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
lowerCAmelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 72 | 1 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __snake_case ( _lowercase):
def __init__( self : List[Any] , __lowerCAmelCase : int = 1_0_1 ):
"""simple docstring"""
_lowerCamelCase : Dict = length
def __len__( self : Dict ):
"""simple docstring"""
return self.length
def __getitem__( self : List[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
return i
class __snake_case :
def __call__( self : str , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return {"input_ids": torch.tensor(__lowerCAmelCase ), "labels": torch.tensor(__lowerCAmelCase )}
class __snake_case ( nn.Module):
def __init__( self : str ):
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCamelCase : List[Any] = nn.Linear(1_2_0 , 8_0 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str=None ):
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __snake_case ( _lowercase):
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_lowerCamelCase : int = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Optional[int] = f'''--output_dir {output_dir}'''.split()
_lowerCamelCase : int = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __snake_case ( _lowercase):
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Tuple = f'''--output_dir {output_dir}'''.split()
_lowerCamelCase : str = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCAmelCase__ = HfArgumentParser((TrainingArguments,))
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowerCAmelCase__ = DummyDataset(dataset_length)
def snake_case_ ( A_ : EvalPrediction ):
'''simple docstring'''
_lowerCamelCase : str = list(range(len(A_ ) ) )
_lowerCamelCase : List[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
lowerCAmelCase__ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCAmelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase__ = 2
lowerCAmelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase__ = None
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : int = 2_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : int = [0 for i in range(n + 1 )]
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Any = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, A_ ):
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = 0
for i in range(A_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 | 1 |
"""simple docstring"""
import re
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : int = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(A_, A_ ) )
if __name__ == "__main__":
lowerCAmelCase__ = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 72 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Any = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape
_lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ )
_lowerCamelCase : str = emb.weight.data
return lin_layer
def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model''']
remove_ignore_keys_(A_ )
_lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ )
if mbart_aa and finetuned:
_lowerCamelCase : Any = '''relu'''
_lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight''']
_lowerCamelCase : Any = MBartForConditionalGeneration(A_ )
model.model.load_state_dict(A_ )
if finetuned:
_lowerCamelCase : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 72 | 1 |
"""simple docstring"""
from math import factorial, radians
def snake_case_ ( A_ : float, A_ : int = 18, A_ : int = 10 ):
'''simple docstring'''
_lowerCamelCase : Any = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
_lowerCamelCase : Tuple = radians(A_ )
_lowerCamelCase : int = angle_in_radians
_lowerCamelCase : Any = 3
_lowerCamelCase : str = -1
for _ in range(A_ ):
result += (b * (angle_in_radians**a)) / factorial(A_ )
_lowerCamelCase : Optional[Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(A_, A_ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = current_set.copy()
for row_index, row in enumerate(A_ ):
_lowerCamelCase : Tuple = row[0]
for column_index, column in enumerate(A_ ):
if magnitude == 0:
_lowerCamelCase : List[Any] = column
continue
_lowerCamelCase : List[Any] = column / magnitude
# Subtract to cancel term
_lowerCamelCase : Union[str, Any] = current_set[0]
_lowerCamelCase : Dict = [first_row]
_lowerCamelCase : str = current_set[1::]
for row in current_set:
_lowerCamelCase : Union[str, Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A_ )
continue
for column_index in range(len(A_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_lowerCamelCase : Any = final_set[0]
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_lowerCamelCase : Dict = simplify(A_ )
for i in range(len(A_ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, A_ )
_lowerCamelCase : Tuple = resultant
return final_set
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
if len(A_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
_lowerCamelCase : Dict = len(A_ ) + 1
if any(len(A_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(A_, (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(A_ ) == 1:
return [equations[0][-1] / equations[0][0]]
_lowerCamelCase : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
_lowerCamelCase : str = data_set.copy()
_lowerCamelCase : List[Any] = []
for row_index, row in enumerate(A_ ):
if 0 not in row:
_lowerCamelCase : Union[str, Any] = data_set.pop(A_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0, A_ )
_lowerCamelCase : List[str] = data_set.copy()
_lowerCamelCase : int = simplify(A_ )
_lowerCamelCase : int = simplified[::-1]
_lowerCamelCase : list = []
for row in simplified:
_lowerCamelCase : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A_ ) == 0:
solutions.append(0 )
continue
_lowerCamelCase : Tuple = temp_row[1::]
_lowerCamelCase : Tuple = temp_row[::-1]
for column_index, column in enumerate(A_ ):
current_solution -= column * solutions[column_index]
solutions.append(A_ )
_lowerCamelCase : Optional[int] = []
for item in solutions:
final.append(float(round(A_, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 72 | 1 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = model.config
_lowerCamelCase : List[Any] = DonutSwinConfig(
image_size=original_config.input_size, patch_size=4, depths=original_config.encoder_layer, num_heads=[4, 8, 16, 32], window_size=original_config.window_size, embed_dim=1_28, )
_lowerCamelCase : int = MBartConfig(
is_decoder=A_, is_encoder_decoder=A_, add_cross_attention=A_, decoder_layers=original_config.decoder_layer, max_position_embeddings=original_config.max_position_embeddings, vocab_size=len(
model.decoder.tokenizer ), scale_embedding=A_, add_final_layer_norm=A_, )
return encoder_config, decoder_config
def snake_case_ ( A_ : Any ):
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase : List[str] = name.replace('''encoder.model''', '''encoder''' )
if "decoder.model" in name:
_lowerCamelCase : List[Any] = name.replace('''decoder.model''', '''decoder''' )
if "patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase : int = name.replace('''patch_embed.norm''', '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_lowerCamelCase : str = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase : int = name.replace('''attn.proj''', '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_lowerCamelCase : int = name.replace('''attn''', '''attention.self''' )
if "norm1" in name:
_lowerCamelCase : List[str] = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase : List[Any] = name.replace('''norm2''', '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase : Union[str, Any] = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase : str = name.replace('''mlp.fc2''', '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase : List[Any] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase : Union[str, Any] = '''encoder.layernorm.bias'''
return name
def snake_case_ ( A_ : Tuple, A_ : Dict ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Tuple = orig_state_dict.pop(A_ )
if "qkv" in key:
_lowerCamelCase : List[Any] = key.split('''.''' )
_lowerCamelCase : str = int(key_split[3] )
_lowerCamelCase : List[Any] = int(key_split[5] )
_lowerCamelCase : List[str] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase : int = val[:dim, :]
_lowerCamelCase : Union[str, Any] = val[dim : dim * 2, :]
_lowerCamelCase : int = val[-dim:, :]
else:
_lowerCamelCase : int = val[:dim]
_lowerCamelCase : int = val[dim : dim * 2]
_lowerCamelCase : Any = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase : Optional[Any] = val
return orig_state_dict
def snake_case_ ( A_ : Optional[Any], A_ : Dict=None, A_ : Dict=False ):
'''simple docstring'''
_lowerCamelCase : Dict = DonutModel.from_pretrained(A_ ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase : Optional[int] = get_configs(A_ )
_lowerCamelCase : str = DonutSwinModel(A_ )
_lowerCamelCase : Optional[Any] = MBartForCausalLM(A_ )
_lowerCamelCase : int = VisionEncoderDecoderModel(encoder=A_, decoder=A_ )
model.eval()
_lowerCamelCase : Optional[Any] = original_model.state_dict()
_lowerCamelCase : List[str] = convert_state_dict(A_, A_ )
model.load_state_dict(A_ )
# verify results on scanned document
_lowerCamelCase : Union[str, Any] = load_dataset('''hf-internal-testing/example-documents''' )
_lowerCamelCase : int = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_lowerCamelCase : List[str] = XLMRobertaTokenizerFast.from_pretrained(A_, from_slow=A_ )
_lowerCamelCase : List[Any] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1] )
_lowerCamelCase : str = DonutProcessor(A_, A_ )
_lowerCamelCase : List[Any] = processor(A_, return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase : Optional[int] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase : Tuple = '''When is the coffee break?'''
_lowerCamelCase : Any = task_prompt.replace('''{user_input}''', A_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase : List[Any] = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase : List[Any] = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase : str = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase : Tuple = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase : Optional[int] = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_lowerCamelCase : Optional[Any] = original_model.decoder.tokenizer(A_, add_special_tokens=A_, return_tensors='''pt''' )[
'''input_ids'''
]
_lowerCamelCase : Optional[Any] = original_model.encoder.model.patch_embed(A_ )
_lowerCamelCase , _lowerCamelCase : int = model.encoder.embeddings(A_ )
assert torch.allclose(A_, A_, atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase : Union[str, Any] = original_model.encoder(A_ )
_lowerCamelCase : Tuple = model.encoder(A_ ).last_hidden_state
assert torch.allclose(A_, A_, atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase : Any = original_model(A_, A_, A_ ).logits
_lowerCamelCase : Dict = model(A_, decoder_input_ids=A_ ).logits
assert torch.allclose(A_, A_, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
processor.save_pretrained(A_ )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1], commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1], commit_message='''Update model''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 72 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "Speech2TextFeatureExtractor"
snake_case__ : Union[str, Any] = "Speech2TextTokenizer"
def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : str = False
def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase : str = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : List[str] = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = self.tokenizer
yield
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : Tuple = False
| 72 | 1 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __snake_case ( _lowercase):
snake_case__ : Any = DistilBertTokenizer
snake_case__ : Optional[Any] = DistilBertTokenizerFast
snake_case__ : int = True
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
_lowerCamelCase : int = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
_lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : Tuple, A_ : Tuple, A_ : Any, A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = [False] * len(A_ )
_lowerCamelCase : Union[str, Any] = []
queue.append(A_ )
_lowerCamelCase : str = True
while queue:
_lowerCamelCase : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A_ )
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = u
return visited[t]
def snake_case_ ( A_ : Union[str, Any], A_ : str, A_ : int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [-1] * (len(A_ ))
_lowerCamelCase : Optional[Any] = 0
while bfs(A_, A_, A_, A_ ):
_lowerCamelCase : Optional[Any] = float('''Inf''' )
_lowerCamelCase : Union[str, Any] = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Union[str, Any] = min(A_, graph[parent[s]][s] )
_lowerCamelCase : List[str] = parent[s]
max_flow += path_flow
_lowerCamelCase : List[str] = sink
while v != source:
_lowerCamelCase : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : Optional[Any] = parent[v]
return max_flow
lowerCAmelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase__ , lowerCAmelCase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase__ = {'''bert_for_seq_generation''': 512}
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = VOCAB_FILES_NAMES
snake_case__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[int] = []
snake_case__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : List[str]="<unk>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : str="<::::>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : int , ):
"""simple docstring"""
_lowerCamelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = vocab_file
_lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Any = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Union[str, Any] , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.piece_to_id(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.sp_model.IdToPiece(__lowerCAmelCase )
return token
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Union[str, Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_lowerCamelCase : Union[str, Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 72 |
"""simple docstring"""
import math
def snake_case_ ( A_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( A_ : float = 0.1 ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = 3
_lowerCamelCase : List[str] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ):
primes += is_prime(A_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 1 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __snake_case :
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_lowerCamelCase : str = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : List[str] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_lowerCamelCase : Any = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_lowerCamelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_dummy_components()
_lowerCamelCase : Tuple = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : List[str] = inputs['''prompt''']
_lowerCamelCase : Tuple = inputs['''generator''']
_lowerCamelCase : Any = inputs['''num_inference_steps''']
_lowerCamelCase : Optional[int] = inputs['''output_type''']
if "image" in inputs:
_lowerCamelCase : List[str] = inputs['''image''']
else:
_lowerCamelCase : Optional[Any] = None
if "mask_image" in inputs:
_lowerCamelCase : Any = inputs['''mask_image''']
else:
_lowerCamelCase : List[Any] = None
if "original_image" in inputs:
_lowerCamelCase : Any = inputs['''original_image''']
else:
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase , _lowerCamelCase : Tuple = pipe.encode_prompt(__lowerCAmelCase )
# inputs with prompt converted to embeddings
_lowerCamelCase : Optional[Any] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
_lowerCamelCase : int = image
if mask_image is not None:
_lowerCamelCase : Union[str, Any] = mask_image
if original_image is not None:
_lowerCamelCase : List[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Any = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase , __lowerCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
_lowerCamelCase : Tuple = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inputs['''generator''']
_lowerCamelCase : List[Any] = inputs['''num_inference_steps''']
_lowerCamelCase : Tuple = inputs['''output_type''']
# inputs with prompt converted to embeddings
_lowerCamelCase : Any = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
_lowerCamelCase : Optional[int] = image
if mask_image is not None:
_lowerCamelCase : List[str] = mask_image
if original_image is not None:
_lowerCamelCase : int = original_image
_lowerCamelCase : Any = pipe_loaded(**__lowerCAmelCase )[0]
_lowerCamelCase : str = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase , 1E-4 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Any = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_lowerCamelCase : str = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : List[str] = pipe_loaded(**__lowerCAmelCase )[0]
_lowerCamelCase : List[str] = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase , 1E-4 )
| 72 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : int = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] )
_lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase )
_lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
_lowerCamelCase : int = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
_lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' )
_lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = -1
_lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n"
_lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = -1
_lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 )
_lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 72 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "Speech2TextFeatureExtractor"
snake_case__ : Union[str, Any] = "Speech2TextTokenizer"
def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : str = False
def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase : str = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : List[str] = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = self.tokenizer
yield
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : Tuple = False
| 72 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : int = "retribert"
def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : int = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : int = share_encoders
_lowerCamelCase : Optional[Any] = projection_dim
| 72 | 1 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_lowerCamelCase : Optional[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCamelCase : Any = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_lowerCamelCase : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : int = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 72 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_lowerCamelCase : Optional[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCamelCase : Any = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_lowerCamelCase : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : int = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : list[int], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = int(A_ )
# Initialize Result
_lowerCamelCase : Dict = []
# Traverse through all denomination
for denomination in reversed(A_ ):
# Find denominations
while int(A_ ) >= int(A_ ):
total_value -= int(A_ )
answer.append(A_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase__ = []
lowerCAmelCase__ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
lowerCAmelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 72 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Dict = is_training
_lowerCamelCase : str = use_auxiliary_loss
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : int = min_size
_lowerCamelCase : Any = max_size
_lowerCamelCase : int = num_labels
_lowerCamelCase : List[str] = mask_feature_size
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
_lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = output.encoder_hidden_states
_lowerCamelCase : Tuple = output.pixel_decoder_hidden_states
_lowerCamelCase : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case__ : Any = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : List[str] = False
snake_case__ : Optional[int] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MaskFormerModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
_lowerCamelCase : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(),
}
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCamelCase : Union[str, Any] = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1E-4
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : int = model(**__lowerCAmelCase )
_lowerCamelCase : str = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : List[str] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : str = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : Tuple = self.default_image_processor
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : Any = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[str] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
_lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase )
_lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']]
_lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 72 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __snake_case :
def __init__( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=1_3 , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Tuple=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Any=5_1_2 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=None , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Optional[int] = use_token_type_ids
_lowerCamelCase : str = use_labels
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Union[str, Any] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : str = scope
_lowerCamelCase : List[Any] = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : str = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Optional[int] = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Any = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , *__lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = OpenAIGPTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_lowerCamelCase : Dict = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , *__lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = OpenAIGPTLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , *__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = OpenAIGPTDoubleHeadsModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , *__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : int = OpenAIGPTForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Tuple = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : List[Any] = config_and_inputs
_lowerCamelCase : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , _lowercase , unittest.TestCase):
snake_case__ : str = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ : str = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict=False ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_lowerCamelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase , )
_lowerCamelCase : Any = inputs_dict['''labels''']
_lowerCamelCase : Optional[int] = inputs_dict['''labels''']
_lowerCamelCase : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCAmelCase , )
_lowerCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = OpenAIGPTModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , n_embd=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = OpenAIGPTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=__lowerCAmelCase ) # the president is
_lowerCamelCase : List[str] = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_lowerCamelCase : int = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCAmelCase )
| 72 |
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case_ ( A_ : dict, A_ : int, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] = set()
# keep track of all the paths to be checked
_lowerCamelCase : str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCamelCase : str = queue.pop(0 )
# get the last node from the path
_lowerCamelCase : List[Any] = path[-1]
if node not in explored:
_lowerCamelCase : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCamelCase : Union[str, Any] = list(A_ )
new_path.append(A_ )
queue.append(A_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A_ )
# in case there's no path between the 2 nodes
return []
def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCamelCase : Optional[int] = [start]
_lowerCamelCase : int = set(A_ )
# Keep tab on distances from `start` node.
_lowerCamelCase : int = {start: 0, target: -1}
while queue:
_lowerCamelCase : Optional[Any] = queue.pop(0 )
if node == target:
_lowerCamelCase : Any = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A_ )
queue.append(A_ )
_lowerCamelCase : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 72 | 1 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
_lowerCamelCase : Optional[Any] = quote(A_ )
return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
| 72 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase : List[str] = LxmertForPreTraining(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A_, A_, A_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : float, A_ : float, A_ : int ):
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(A_, A_ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
_lowerCamelCase : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowerCamelCase : List[Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841
_lowerCamelCase : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowerCamelCase : Any = defaultdict(A_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_lowerCamelCase : List[str] = mst(A_ )
_lowerCamelCase : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_lowerCamelCase : int = tuple(answer[:2] )
_lowerCamelCase : int = tuple(edge[::-1] )
assert edge in result or reverse in result
| 72 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any=1_3 , __lowerCAmelCase : Dict=6_4 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : int=3 , __lowerCAmelCase : str=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=3_2 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Tuple=3_7 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Union[str, Any]=[1, 1_6, 4, 4] , __lowerCAmelCase : List[Any]=None , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Dict = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Any = num_channels
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Any = scope
_lowerCamelCase : Tuple = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_lowerCamelCase : Optional[int] = (self.image_size // 3_2) ** 2
_lowerCamelCase : Dict = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ViTHybridModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : List[str] = ViTHybridForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = config_and_inputs
_lowerCamelCase : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
snake_case__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : List[str] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = ViTHybridModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = _config_zero_init(__lowerCAmelCase )
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(config=__lowerCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_lowerCamelCase : List[str] = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCAmelCase )
_lowerCamelCase : Dict = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Dict = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : str = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : str = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
_lowerCamelCase : Any = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
_lowerCamelCase : Any = prepare_img()
_lowerCamelCase : Dict = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' )
_lowerCamelCase : List[Any] = model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
_lowerCamelCase : List[Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 72 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __snake_case ( _lowercase):
snake_case__ : Any = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
snake_case__ : Any = BartTokenizer
def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = '''post_processor'''
_lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
_lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase : int = tuple(state['''cls'''] )
_lowerCamelCase : Union[str, Any] = False
if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = add_prefix_space
_lowerCamelCase : Optional[Any] = True
if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets:
_lowerCamelCase : Any = trim_offsets
_lowerCamelCase : str = True
if changes_to_apply:
_lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) )
_lowerCamelCase : str = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
_lowerCamelCase : str = value
def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __snake_case :
snake_case__ : Optional[int] = BlenderbotConfig
snake_case__ : Optional[int] = {}
snake_case__ : str = "gelu"
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=1_3 , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[Any]=9_9 , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Optional[int]=3_7 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : int=2_0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Dict=0 , ):
"""simple docstring"""
_lowerCamelCase : Any = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : int = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : Any = eos_token_id
_lowerCamelCase : Any = pad_token_id
_lowerCamelCase : List[Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCamelCase : List[str] = prepare_blenderbot_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = TFBlenderbotModel(config=__lowerCAmelCase ).get_decoder()
_lowerCamelCase : Union[str, Any] = inputs_dict['''input_ids''']
_lowerCamelCase : str = input_ids[:1, :]
_lowerCamelCase : Union[str, Any] = inputs_dict['''attention_mask'''][:1, :]
_lowerCamelCase : str = inputs_dict['''head_mask''']
_lowerCamelCase : str = 1
# first forward pass
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCamelCase : int = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCamelCase : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
_lowerCamelCase : List[str] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCamelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-3 )
def snake_case_ ( A_ : Dict, A_ : Optional[Any], A_ : List[Any], A_ : Optional[int]=None, A_ : List[str]=None, A_ : Dict=None, A_ : Any=None, A_ : Union[str, Any]=None, ):
'''simple docstring'''
if attention_mask is None:
_lowerCamelCase : Any = tf.cast(tf.math.not_equal(A_, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_lowerCamelCase : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_lowerCamelCase : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Optional[int] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
snake_case__ : Optional[int] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
snake_case__ : Dict = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case__ : Union[str, Any] = True
snake_case__ : str = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : str = TFBlenderbotModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self , config_class=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase):
snake_case__ : str = ["My friends are cool but they eat too many carbs."]
snake_case__ : int = "facebook/blenderbot-400M-distill"
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.tokenizer(self.src_text , return_tensors='''tf''' )
_lowerCamelCase : int = self.model.generate(
model_inputs.input_ids , )
_lowerCamelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCAmelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 72 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : str ):
'''simple docstring'''
return [ord(A_ ) - 96 for elem in plain]
def snake_case_ ( A_ : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''', A_ )
print('''Decoded:''', decode(A_ ) )
if __name__ == "__main__":
main()
| 72 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
_lowerCamelCase : Optional[int] = _modexpt(A_, exponent // 2, A_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A_, exponent - 1, A_ )) % modulo_value
def snake_case_ ( A_ : int = 17_77, A_ : int = 18_55, A_ : int = 8 ):
'''simple docstring'''
_lowerCamelCase : int = base
for _ in range(1, A_ ):
_lowerCamelCase : Optional[Any] = _modexpt(A_, A_, 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A_ ):
if len(A_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A_ ) )
return data_lists
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for dlist, weight in zip(A_, A_ ):
_lowerCamelCase : Any = min(A_ )
_lowerCamelCase : Optional[Any] = max(A_ )
_lowerCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowerCamelCase : str = F'''Invalid weight of {weight:f} provided'''
raise ValueError(A_ )
score_lists.append(A_ )
return score_lists
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A_ ):
_lowerCamelCase : List[str] = final_scores[j] + ele
return final_scores
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = get_data(A_ )
_lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ )
_lowerCamelCase : str = generate_final_scores(A_ )
# append scores to source data
for i, ele in enumerate(A_ ):
source_data[i].append(A_ )
return source_data
| 72 | 1 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
@dataclass
class __snake_case :
snake_case__ : nn.Module
snake_case__ : List[nn.Module] = field(default_factory=_lowercase)
snake_case__ : list = field(default_factory=_lowercase)
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tensor , __lowerCAmelCase : Tensor ):
"""simple docstring"""
_lowerCamelCase : List[Any] = len(list(m.modules() ) ) == 1 or isinstance(__lowerCAmelCase , nn.Convad ) or isinstance(__lowerCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__lowerCAmelCase )
def __call__( self : Optional[int] , __lowerCAmelCase : Tensor ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__lowerCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return list(filter(lambda __lowerCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __snake_case :
snake_case__ : nn.Module
snake_case__ : nn.Module
snake_case__ : int = 1
snake_case__ : List = field(default_factory=_lowercase)
snake_case__ : List = field(default_factory=_lowercase)
snake_case__ : bool = True
def __call__( self : Optional[int] , __lowerCAmelCase : Tensor ):
"""simple docstring"""
_lowerCamelCase : List[str] = Tracker(self.dest )(__lowerCAmelCase ).parametrized
_lowerCamelCase : List[Any] = Tracker(self.src )(__lowerCAmelCase ).parametrized
_lowerCamelCase : Optional[Any] = list(filter(lambda __lowerCAmelCase : type(__lowerCAmelCase ) not in self.src_skip , __lowerCAmelCase ) )
_lowerCamelCase : Any = list(filter(lambda __lowerCAmelCase : type(__lowerCAmelCase ) not in self.dest_skip , __lowerCAmelCase ) )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ) and self.raise_if_mismatch:
raise Exception(
f'''Numbers of operations are different. Source module has {len(__lowerCAmelCase )} operations while'''
f''' destination module has {len(__lowerCAmelCase )}.''' )
for dest_m, src_m in zip(__lowerCAmelCase , __lowerCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
class __snake_case ( nn.Module):
def __init__( self : List[Any] , __lowerCAmelCase : nn.Module ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), f'''Unexpected layer name {k}'''
_lowerCamelCase : List[Any] = len(__lowerCAmelCase ) + 1
feature_blocks.append((f'''res{block_index}''', v) )
_lowerCamelCase : Optional[int] = nn.ModuleDict(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Tensor ):
"""simple docstring"""
return get_trunk_forward_outputs(
__lowerCAmelCase , out_feat_keys=__lowerCAmelCase , feature_blocks=self._feature_blocks , )
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Any = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
if x not in self:
_lowerCamelCase : Tuple = self.convert_name_to_timm(__lowerCAmelCase )
_lowerCamelCase : str = partial(lambda: (timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ).eval(), None) )
else:
_lowerCamelCase : Tuple = super().__getitem__(__lowerCAmelCase )
return val
class __snake_case ( _lowercase):
def __getitem__( self : str , __lowerCAmelCase : str ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
_lowerCamelCase : str = RegNetModel
else:
_lowerCamelCase : Union[str, Any] = RegNetForImageClassification
return val
def snake_case_ ( A_ : Tuple, A_ : Optional[Any], A_ : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
_lowerCamelCase : List[str] = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def snake_case_ ( A_ : str, A_ : Callable[[], nn.Module], A_ : Callable[[], nn.Module], A_ : RegNetConfig, A_ : Path, A_ : bool = True, ):
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
_lowerCamelCase , _lowerCamelCase : str = from_model_func()
_lowerCamelCase : Union[str, Any] = our_model_func(A_ ).eval()
_lowerCamelCase : Any = ModuleTransfer(src=A_, dest=A_, raise_if_mismatch=A_ )
_lowerCamelCase : Tuple = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(A_ )
if from_state_dict is not None:
_lowerCamelCase : Any = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_lowerCamelCase : Optional[int] = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
_lowerCamelCase : int = manually_copy_vissl_head(A_, our_model.state_dict(), A_ )
our_model.load_state_dict(A_ )
_lowerCamelCase : List[Any] = our_model(A_, output_hidden_states=A_ )
_lowerCamelCase : List[Any] = (
our_outputs.logits if isinstance(A_, A_ ) else our_outputs.last_hidden_state
)
_lowerCamelCase : str = from_model(A_ )
_lowerCamelCase : int = from_output[-1] if type(A_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_lowerCamelCase : Union[str, Any] = our_outputs.hidden_states[-1]
assert torch.allclose(A_, A_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='''Add model''', use_temp_dir=A_, )
_lowerCamelCase : int = 2_24 if '''seer''' not in name else 3_84
# we can use the convnext one
_lowerCamelCase : Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''', size=A_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name, commit_message='''Add image processor''', use_temp_dir=A_, )
print(F'''Pushed {name}''' )
def snake_case_ ( A_ : Path, A_ : str = None, A_ : bool = True ):
'''simple docstring'''
_lowerCamelCase : int = '''imagenet-1k-id2label.json'''
_lowerCamelCase : Any = 10_00
_lowerCamelCase : str = (1, num_labels)
_lowerCamelCase : Any = '''huggingface/label-files'''
_lowerCamelCase : Any = num_labels
_lowerCamelCase : Optional[Any] = json.load(open(cached_download(hf_hub_url(A_, A_, repo_type='''dataset''' ) ), '''r''' ) )
_lowerCamelCase : List[str] = {int(A_ ): v for k, v in idalabel.items()}
_lowerCamelCase : List[Any] = idalabel
_lowerCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Dict = partial(A_, num_labels=A_, idalabel=A_, labelaid=A_ )
_lowerCamelCase : Any = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 1_52, 3_68], groups_width=8, layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 1_60, 3_84], groups_width=16, layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 2_40, 5_28], groups_width=24, layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 1_28, 2_88, 6_72], groups_width=16, layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 1_68, 4_08, 9_12], groups_width=24, layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 1_92, 4_32, 10_08], groups_width=48, layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 2_40, 5_60, 13_60], groups_width=40, layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[1_68, 3_92, 7_84, 16_24], groups_width=56, layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 2_40, 7_20, 19_20], groups_width=1_20, layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[2_24, 4_48, 8_96, 22_40], groups_width=1_12, layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[2_56, 5_12, 8_96, 20_48], groups_width=1_28, layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[3_36, 6_72, 13_44, 25_20], groups_width=1_68, layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 1_52, 3_68], groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 1_04, 2_08, 4_40], groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 1_12, 2_56, 6_08], groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 1_28, 3_20, 7_68], groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 1_20, 3_36, 8_88], groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 2_16, 5_76, 15_12], groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[1_28, 1_92, 5_12, 10_88], groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[1_44, 2_88, 5_76, 12_96], groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[1_68, 4_48, 8_96, 20_16], groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[2_24, 4_48, 8_96, 22_40], groups_width=1_12 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[2_24, 4_48, 12_32, 30_24], groups_width=1_12 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[2_32, 6_96, 13_92, 37_12], groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[2_32, 6_96, 13_92, 37_12], groups_width=2_32 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[3_28, 9_84, 19_68, 49_20], groups_width=3_28 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[5_28, 10_56, 29_04, 73_92], groups_width=2_64 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[6_40, 16_96, 25_44, 50_88], groups_width=6_40 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80], groups_width=10_10 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[2_32, 6_96, 13_92, 37_12], groups_width=2_32 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[3_28, 9_84, 19_68, 49_20], groups_width=3_28 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[5_28, 10_56, 29_04, 73_92], groups_width=2_64 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[6_40, 16_96, 25_44, 50_88], groups_width=6_40 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80], groups_width=10_10 ),
}
_lowerCamelCase : Union[str, Any] = NameToOurModelFuncMap()
_lowerCamelCase : List[str] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(A_ : str, A_ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
_lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(A_, model_dir=str(A_ ), map_location='''cpu''' )
_lowerCamelCase : List[str] = model_func()
# check if we have a head, if yes add it
_lowerCamelCase : Any = files['''classy_state_dict''']['''base_model''']['''model''']
_lowerCamelCase : Tuple = model_state_dict['''trunk''']
model.load_state_dict(A_ )
return model.eval(), model_state_dict["heads"]
# pretrained
_lowerCamelCase : Optional[int] = partial(
A_, '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
_lowerCamelCase : List[Any] = partial(
A_, '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
_lowerCamelCase : Dict = partial(
A_, '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
_lowerCamelCase : str = partial(
A_, '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=10_10, w_a=17_44, w_a=620.83, w_m=2.52 ) ) ), )
# IN1K finetuned
_lowerCamelCase : Optional[Any] = partial(
A_, '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
_lowerCamelCase : Tuple = partial(
A_, '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), )
_lowerCamelCase : Tuple = partial(
A_, '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), )
_lowerCamelCase : Optional[Any] = partial(
A_, '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''', lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=10_10, w_a=17_44, w_a=620.83, w_m=2.52 ) ) ), )
if model_name:
convert_weight_and_push(
A_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], A_, A_, )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
A_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], A_, A_, A_, )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 72 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "unispeech"
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = feat_extract_norm
_lowerCamelCase : List[Any] = feat_extract_activation
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : Tuple = list(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : List[str] = conv_bias
_lowerCamelCase : List[str] = num_conv_pos_embeddings
_lowerCamelCase : Tuple = num_conv_pos_embedding_groups
_lowerCamelCase : List[str] = len(self.conv_dim )
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Optional[Any] = feat_proj_dropout
_lowerCamelCase : Optional[int] = final_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[str] = num_ctc_classes
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = do_stable_layer_norm
_lowerCamelCase : Tuple = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Any = apply_spec_augment
_lowerCamelCase : Dict = mask_time_prob
_lowerCamelCase : List[str] = mask_time_length
_lowerCamelCase : Optional[Any] = mask_time_min_masks
_lowerCamelCase : List[str] = mask_feature_prob
_lowerCamelCase : int = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : Optional[Any] = num_codevectors_per_group
_lowerCamelCase : int = num_codevector_groups
_lowerCamelCase : List[Any] = contrastive_logits_temperature
_lowerCamelCase : List[str] = feat_quantizer_dropout
_lowerCamelCase : Dict = num_negatives
_lowerCamelCase : Optional[int] = codevector_dim
_lowerCamelCase : List[Any] = proj_codevector_dim
_lowerCamelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCamelCase : Union[str, Any] = ctc_loss_reduction
_lowerCamelCase : Any = ctc_zero_infinity
# pretraining loss
_lowerCamelCase : str = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : list ):
'''simple docstring'''
_enforce_args(A_, A_ )
if n == 0:
return 0
_lowerCamelCase : Union[str, Any] = float('''-inf''' )
for i in range(1, n + 1 ):
_lowerCamelCase : List[Any] = max(
A_, prices[i - 1] + naive_cut_rod_recursive(n - i, A_ ) )
return max_revue
def snake_case_ ( A_ : int, A_ : list ):
'''simple docstring'''
_enforce_args(A_, A_ )
_lowerCamelCase : Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(A_, A_, A_ )
def snake_case_ ( A_ : int, A_ : list, A_ : list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : Tuple = float('''-inf''' )
for i in range(1, n + 1 ):
_lowerCamelCase : Any = max(
A_, prices[i - 1] + _top_down_cut_rod_recursive(n - i, A_, A_ ), )
_lowerCamelCase : Dict = max_revenue
return max_rev[n]
def snake_case_ ( A_ : int, A_ : list ):
'''simple docstring'''
_enforce_args(A_, A_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : Optional[Any] = [float('''-inf''' ) for _ in range(n + 1 )]
_lowerCamelCase : Optional[Any] = 0
for i in range(1, n + 1 ):
_lowerCamelCase : Tuple = max_rev[i]
for j in range(1, i + 1 ):
_lowerCamelCase : str = max(A_, prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : Tuple = max_revenue_i
return max_rev[n]
def snake_case_ ( A_ : int, A_ : list ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : Union[str, Any] = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(A_ )
if n > len(A_ ):
_lowerCamelCase : List[Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(A_ )}'''
)
raise ValueError(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : List[str] = len(A_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Union[str, Any] = 36
_lowerCamelCase : List[Any] = top_down_cut_rod(A_, A_ )
_lowerCamelCase : Any = bottom_up_cut_rod(A_, A_ )
_lowerCamelCase : Any = naive_cut_rod_recursive(A_, A_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 72 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
_lowerCamelCase : Optional[Any] = quote(A_ )
return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
| 72 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __snake_case :
def __init__( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any=3 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Any=5 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Union[str, Any]=3_7 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : List[Any]=1_6 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : int=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_input_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : Any = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Tuple = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Any = num_labels
_lowerCamelCase : List[Any] = num_choices
_lowerCamelCase : Union[str, Any] = scope
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : int = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = FalconModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Any = FalconModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , )
_lowerCamelCase : int = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = FalconForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = FalconForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
_lowerCamelCase : Optional[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase , )
_lowerCamelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase : int = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase : str = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0]
_lowerCamelCase : str = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
_lowerCamelCase : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[int] = config_and_inputs
_lowerCamelCase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Tuple = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (FalconForCausalLM,) if is_torch_available() else ()
snake_case__ : Optional[Any] = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Optional[Any] = False
snake_case__ : int = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = FalconModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase , *_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_lowerCamelCase : Dict = alibi
self.model_tester.create_and_check_model(__lowerCAmelCase , *__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Dict = input_dict['''input_ids''']
_lowerCamelCase : Dict = input_ids.ne(1 ).to(__lowerCAmelCase )
_lowerCamelCase : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : List[Any] = FalconForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = 3
_lowerCamelCase : Any = '''single_label_classification'''
_lowerCamelCase : Tuple = input_dict['''input_ids''']
_lowerCamelCase : Tuple = input_ids.ne(1 ).to(__lowerCAmelCase )
_lowerCamelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : Optional[int] = FalconForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = input_dict['''input_ids''']
_lowerCamelCase : Dict = FalconForCausalLM(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase , use_cache=__lowerCAmelCase )
_lowerCamelCase : List[str] = input_ids.shape[0]
_lowerCamelCase : Optional[int] = model._convert_to_rw_cache(result.past_key_values )
_lowerCamelCase : List[Any] = model._convert_cache_to_standard_format(__lowerCAmelCase , __lowerCAmelCase )
for layer in range(len(__lowerCAmelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str = 3
_lowerCamelCase : List[str] = '''multi_label_classification'''
_lowerCamelCase : Tuple = input_dict['''input_ids''']
_lowerCamelCase : str = input_ids.ne(1 ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCamelCase : str = FalconForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
for model_class in self.all_generative_model_classes:
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__lowerCAmelCase , '''use_cache''' ):
return
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
if "use_cache" not in inputs:
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_lowerCamelCase : Dict = (
getattr(__lowerCAmelCase , '''decoder_layers''' , __lowerCAmelCase )
or getattr(__lowerCAmelCase , '''num_decoder_layers''' , __lowerCAmelCase )
or config.num_hidden_layers
)
_lowerCamelCase : Any = getattr(__lowerCAmelCase , '''num_kv_heads''' , config.num_attention_heads )
_lowerCamelCase : int = getattr(__lowerCAmelCase , '''d_model''' , config.hidden_size )
_lowerCamelCase : Any = embed_dim // num_attention_heads
_lowerCamelCase : Tuple = outputs['''past_key_values''']
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Any = inputs['''input_ids'''].shape
for i in range(__lowerCAmelCase ):
if config.new_decoder_architecture:
_lowerCamelCase : Optional[int] = config.num_attention_heads
elif config.multi_query:
_lowerCamelCase : List[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
_lowerCamelCase : List[Any] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
_lowerCamelCase : Tuple = model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=1_9 )
_lowerCamelCase : Optional[Any] = tokenizer.batch_decode(__lowerCAmelCase )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : int = FalconForCausalLM.from_pretrained(__lowerCAmelCase )
model.eval()
model.to(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCAmelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=4 )
model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=4 )
model.generate(**__lowerCAmelCase , num_beams=2 , max_new_tokens=4 )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = FalconForCausalLM.from_pretrained(__lowerCAmelCase )
model.eval()
model.to(device=__lowerCAmelCase )
_lowerCamelCase : str = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCAmelCase )
# Test results are the same with and without cache
_lowerCamelCase : Dict = model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=2_0 , use_cache=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=2_0 , use_cache=__lowerCAmelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 72 |
"""simple docstring"""
import unittest
import numpy as np
def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
if shape_a[0] != shape_b[0]:
_lowerCamelCase : Tuple = (
'''Expected the same number of rows for A and B. '''
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(A_ )
if shape_b[1] != shape_c[1]:
_lowerCamelCase : Tuple = (
'''Expected the same number of columns for B and C. '''
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(A_ )
_lowerCamelCase : List[str] = pseudo_inv
if a_inv is None:
try:
_lowerCamelCase : Any = np.linalg.inv(A_ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] )
_lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] )
_lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase )
self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : int = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 72 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Dict = is_training
_lowerCamelCase : str = use_auxiliary_loss
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : int = min_size
_lowerCamelCase : Any = max_size
_lowerCamelCase : int = num_labels
_lowerCamelCase : List[str] = mask_feature_size
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
_lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = output.encoder_hidden_states
_lowerCamelCase : Tuple = output.pixel_decoder_hidden_states
_lowerCamelCase : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case__ : Any = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : List[str] = False
snake_case__ : Optional[int] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MaskFormerModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
_lowerCamelCase : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(),
}
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCamelCase : Union[str, Any] = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1E-4
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : int = model(**__lowerCAmelCase )
_lowerCamelCase : str = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : List[str] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : str = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : Tuple = self.default_image_processor
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : Any = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[str] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
_lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase )
_lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']]
_lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[int], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = int(A_ )
# Initialize Result
_lowerCamelCase : Dict = []
# Traverse through all denomination
for denomination in reversed(A_ ):
# Find denominations
while int(A_ ) >= int(A_ ):
total_value -= int(A_ )
answer.append(A_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase__ = []
lowerCAmelCase__ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
lowerCAmelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 72 | 1 |
"""simple docstring"""
from math import sqrt
def snake_case_ ( A_ : int ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (
number >= 0
), "'number' must been an int and positive"
_lowerCamelCase : Optional[int] = True
# 0 and 1 are none primes.
if number <= 1:
_lowerCamelCase : Tuple = False
for divisor in range(2, int(round(sqrt(A_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_lowerCamelCase : str = False
break
# precondition
assert isinstance(A_, A_ ), "'status' must been from type bool"
return status
def snake_case_ ( A_ : Any ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_lowerCamelCase : Dict = list(range(2, n + 1 ) )
_lowerCamelCase : Dict = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A_ ) ):
for j in range(i + 1, len(A_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_lowerCamelCase : Optional[Any] = 0
# filters actual prime numbers.
_lowerCamelCase : str = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A_, A_ ), "'ans' must been from type list"
return ans
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (n > 2), "'N' must been an int and > 2"
_lowerCamelCase : Union[str, Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1 ):
if is_prime(A_ ):
ans.append(A_ )
# precondition
assert isinstance(A_, A_ ), "'ans' must been from type list"
return ans
def snake_case_ ( A_ : str ):
'''simple docstring'''
assert isinstance(A_, A_ ) and number >= 0, "'number' must been an int and >= 0"
_lowerCamelCase : Tuple = [] # this list will be returns of the function.
# potential prime number factors.
_lowerCamelCase : int = 2
_lowerCamelCase : Optional[int] = number
if number == 0 or number == 1:
ans.append(A_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A_ ):
while quotient != 1:
if is_prime(A_ ) and (quotient % factor == 0):
ans.append(A_ )
quotient /= factor
else:
factor += 1
else:
ans.append(A_ )
# precondition
assert isinstance(A_, A_ ), "'ans' must been from type list"
return ans
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_lowerCamelCase : Union[str, Any] = 0
# prime factorization of 'number'
_lowerCamelCase : List[Any] = prime_factorization(A_ )
_lowerCamelCase : Any = max(A_ )
# precondition
assert isinstance(A_, A_ ), "'ans' must been from type int"
return ans
def snake_case_ ( A_ : Tuple ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_lowerCamelCase : Dict = 0
# prime factorization of 'number'
_lowerCamelCase : Dict = prime_factorization(A_ )
_lowerCamelCase : Optional[Any] = min(A_ )
# precondition
assert isinstance(A_, A_ ), "'ans' must been from type int"
return ans
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
assert isinstance(A_, A_ ), "'number' must been an int"
assert isinstance(number % 2 == 0, A_ ), "compare bust been from type bool"
return number % 2 == 0
def snake_case_ ( A_ : str ):
'''simple docstring'''
assert isinstance(A_, A_ ), "'number' must been an int"
assert isinstance(number % 2 != 0, A_ ), "compare bust been from type bool"
return number % 2 != 0
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
assert (
isinstance(A_, A_ ) and (number > 2) and is_even(A_ )
), "'number' must been an int, even and > 2"
_lowerCamelCase : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_lowerCamelCase : List[Any] = get_prime_numbers(A_ )
_lowerCamelCase : str = len(A_ )
# run variable for while-loops.
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Dict = None
# exit variable. for break up the loops
_lowerCamelCase : Tuple = True
while i < len_pn and loop:
_lowerCamelCase : Dict = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_lowerCamelCase : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(A_, A_ )
and (len(A_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def snake_case_ ( A_ : Optional[int], A_ : Any ):
'''simple docstring'''
assert (
isinstance(A_, A_ )
and isinstance(A_, A_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_lowerCamelCase : str = 0
while numbera != 0:
_lowerCamelCase : Tuple = numbera % numbera
_lowerCamelCase : int = numbera
_lowerCamelCase : Tuple = rest
# precondition
assert isinstance(A_, A_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def snake_case_ ( A_ : Union[str, Any], A_ : str ):
'''simple docstring'''
assert (
isinstance(A_, A_ )
and isinstance(A_, A_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_lowerCamelCase : str = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_lowerCamelCase : Any = prime_factorization(A_ )
_lowerCamelCase : int = prime_factorization(A_ )
elif numbera == 1 or numbera == 1:
_lowerCamelCase : Dict = []
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = max(A_, A_ )
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_lowerCamelCase : Optional[int] = prime_fac_a.count(A_ )
_lowerCamelCase : List[str] = prime_fac_a.count(A_ )
for _ in range(max(A_, A_ ) ):
ans *= n
else:
_lowerCamelCase : int = prime_fac_a.count(A_ )
for _ in range(A_ ):
ans *= n
done.append(A_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_lowerCamelCase : Optional[int] = prime_fac_a.count(A_ )
for _ in range(A_ ):
ans *= n
done.append(A_ )
# precondition
assert isinstance(A_, A_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (n >= 0), "'number' must been a positive int"
_lowerCamelCase : Any = 0
_lowerCamelCase : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A_ ):
ans += 1
# precondition
assert isinstance(A_, A_ ) and is_prime(
A_ ), "'ans' must been a prime number and from type int"
return ans
def snake_case_ ( A_ : Optional[int], A_ : Optional[Any] ):
'''simple docstring'''
assert (
is_prime(A_ ) and is_prime(A_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_lowerCamelCase : Dict = p_number_a + 1 # jump to the next number
_lowerCamelCase : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A_ ):
number += 1
while number < p_number_a:
ans.append(A_ )
number += 1
# fetch the next prime number.
while not is_prime(A_ ):
number += 1
# precondition
assert (
isinstance(A_, A_ )
and ans[0] != p_number_a
and ans[len(A_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (n >= 1), "'n' must been int and >= 1"
_lowerCamelCase : Optional[Any] = [] # will be returned.
for divisor in range(1, n + 1 ):
if n % divisor == 0:
ans.append(A_ )
# precondition
assert ans[0] == 1 and ans[len(A_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (
number > 1
), "'number' must been an int and >= 1"
_lowerCamelCase : List[Any] = get_divisors(A_ )
# precondition
assert (
isinstance(A_, A_ )
and (divisors[0] == 1)
and (divisors[len(A_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def snake_case_ ( A_ : int, A_ : str ):
'''simple docstring'''
assert (
isinstance(A_, A_ )
and isinstance(A_, A_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_lowerCamelCase : List[Any] = gcd(abs(A_ ), abs(A_ ) )
# precondition
assert (
isinstance(A_, A_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (n >= 0), "'n' must been a int and >= 0"
_lowerCamelCase : Tuple = 1 # this will be return.
for factor in range(1, n + 1 ):
ans *= factor
return ans
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (n >= 0), "'n' must been an int and >= 0"
_lowerCamelCase : Any = 0
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : int = 1 # this will be return
for _ in range(n - 1 ):
_lowerCamelCase : int = ans
ans += fiba
_lowerCamelCase : Optional[Any] = tmp
return ans
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : int = 2_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : int = [0 for i in range(n + 1 )]
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Any = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, A_ ):
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = 0
for i in range(A_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 | 1 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''', ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''', ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''', [None, '''v2'''] )
def snake_case_ ( A_ : Any, A_ : str, A_ : int ):
'''simple docstring'''
_lowerCamelCase : str = hf_hub_url(repo_id=A_, path=A_, revision=A_ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(A_ )}'''
| 72 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Any = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape
_lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ )
_lowerCamelCase : str = emb.weight.data
return lin_layer
def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model''']
remove_ignore_keys_(A_ )
_lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ )
if mbart_aa and finetuned:
_lowerCamelCase : Any = '''relu'''
_lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight''']
_lowerCamelCase : Any = MBartForConditionalGeneration(A_ )
model.model.load_state_dict(A_ )
if finetuned:
_lowerCamelCase : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 72 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_lowercase):
snake_case__ : List[Any] = ["note_seq"]
def __init__( self : str , *__lowerCAmelCase : int , **__lowerCAmelCase : str ):
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , *__lowerCAmelCase : str , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = current_set.copy()
for row_index, row in enumerate(A_ ):
_lowerCamelCase : Tuple = row[0]
for column_index, column in enumerate(A_ ):
if magnitude == 0:
_lowerCamelCase : List[Any] = column
continue
_lowerCamelCase : List[Any] = column / magnitude
# Subtract to cancel term
_lowerCamelCase : Union[str, Any] = current_set[0]
_lowerCamelCase : Dict = [first_row]
_lowerCamelCase : str = current_set[1::]
for row in current_set:
_lowerCamelCase : Union[str, Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A_ )
continue
for column_index in range(len(A_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_lowerCamelCase : Any = final_set[0]
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_lowerCamelCase : Dict = simplify(A_ )
for i in range(len(A_ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, A_ )
_lowerCamelCase : Tuple = resultant
return final_set
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
if len(A_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
_lowerCamelCase : Dict = len(A_ ) + 1
if any(len(A_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(A_, (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(A_ ) == 1:
return [equations[0][-1] / equations[0][0]]
_lowerCamelCase : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
_lowerCamelCase : str = data_set.copy()
_lowerCamelCase : List[Any] = []
for row_index, row in enumerate(A_ ):
if 0 not in row:
_lowerCamelCase : Union[str, Any] = data_set.pop(A_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0, A_ )
_lowerCamelCase : List[str] = data_set.copy()
_lowerCamelCase : int = simplify(A_ )
_lowerCamelCase : int = simplified[::-1]
_lowerCamelCase : list = []
for row in simplified:
_lowerCamelCase : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A_ ) == 0:
solutions.append(0 )
continue
_lowerCamelCase : Tuple = temp_row[1::]
_lowerCamelCase : Tuple = temp_row[::-1]
for column_index, column in enumerate(A_ ):
current_solution -= column * solutions[column_index]
solutions.append(A_ )
_lowerCamelCase : Optional[int] = []
for item in solutions:
final.append(float(round(A_, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 72 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
lowerCAmelCase__ = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
lowerCAmelCase__ = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
lowerCAmelCase__ = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
if return_pvalue:
_lowerCamelCase : List[Any] = pearsonr(__lowerCAmelCase , __lowerCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )}
| 72 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "Speech2TextFeatureExtractor"
snake_case__ : Union[str, Any] = "Speech2TextTokenizer"
def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : str = False
def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase : str = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : List[str] = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = self.tokenizer
yield
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : Tuple = False
| 72 | 1 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
class __snake_case ( _lowercase):
def __init__( self : Optional[int] , __lowerCAmelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Dict = nn.ModuleList(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Union[torch.Tensor, float, int] , __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : List[torch.tensor] , __lowerCAmelCase : List[float] , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[Dict[str, Any]] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase , self.nets ) ):
_lowerCamelCase , _lowerCamelCase : Tuple = controlnet(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
# merge samples
if i == 0:
_lowerCamelCase , _lowerCamelCase : List[str] = down_samples, mid_sample
else:
_lowerCamelCase : List[Any] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__lowerCAmelCase , __lowerCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Union[str, os.PathLike] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Callable = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[str] = None , ):
"""simple docstring"""
_lowerCamelCase : Any = 0
_lowerCamelCase : List[Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__lowerCAmelCase , is_main_process=__lowerCAmelCase , save_function=__lowerCAmelCase , safe_serialization=__lowerCAmelCase , variant=__lowerCAmelCase , )
idx += 1
_lowerCamelCase : Optional[int] = model_path_to_save + f'''_{idx}'''
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , __lowerCAmelCase : Optional[Union[str, os.PathLike]] , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowerCamelCase : int = pretrained_model_path
while os.path.isdir(__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = ControlNetModel.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
controlnets.append(__lowerCAmelCase )
idx += 1
_lowerCamelCase : Union[str, Any] = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.''' )
if len(__lowerCAmelCase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(__lowerCAmelCase )
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
from maths.prime_check import is_prime
def snake_case_ ( A_ : int ):
'''simple docstring'''
if not isinstance(A_, A_ ):
_lowerCamelCase : Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(A_ )
if is_prime(A_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
lowerCAmelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
_lowerCamelCase : Stack[int] = Stack()
_lowerCamelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(A_ ) )
elif i in operators:
# RULE 2
operator_stack.push(A_ )
elif i == ")":
# RULE 4
_lowerCamelCase : int = operator_stack.peek()
operator_stack.pop()
_lowerCamelCase : Dict = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase : Any = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase : Optional[int] = operators[opr](A_, A_ )
operand_stack.push(A_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCAmelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 72 |
"""simple docstring"""
import math
def snake_case_ ( A_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( A_ : float = 0.1 ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = 3
_lowerCamelCase : List[str] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ):
primes += is_prime(A_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 1 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE ( self : int , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config()
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Dict = len(__lowerCAmelCase )
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : Union[str, Any] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Any = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCamelCase : str = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCamelCase : str = scheduler.batch_step_no_noise(__lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1E-2
assert abs(result_mean.item() - 0.50_05 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : Tuple = self.dummy_sample_deter
_lowerCamelCase : int = torch.manual_seed(0 )
for t in reversed(range(__lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Dict = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
_lowerCamelCase : Union[str, Any] = pred_prev_sample
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_lowerCamelCase : int = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = len(__lowerCAmelCase )
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter
_lowerCamelCase : List[str] = torch.manual_seed(0 )
for t in reversed(range(__lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCamelCase : List[str] = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Optional[int] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config()
_lowerCamelCase : Optional[int] = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
_lowerCamelCase : Tuple = scheduler.timesteps
for i, timestep in enumerate(__lowerCAmelCase ):
if i == len(__lowerCAmelCase ) - 1:
_lowerCamelCase : str = -1
else:
_lowerCamelCase : List[Any] = timesteps[i + 1]
_lowerCamelCase : Optional[Any] = scheduler.previous_timestep(__lowerCAmelCase )
_lowerCamelCase : Tuple = prev_t.item()
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Any = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__lowerCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Tuple = [1_0_0, 8_7, 5_0, 1, 0]
_lowerCamelCase : Dict = len(__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
| 72 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : int = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] )
_lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase )
_lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
_lowerCamelCase : int = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
_lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' )
_lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = -1
_lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n"
_lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = -1
_lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 )
_lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 72 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Optional[Any] = IFInpaintingPipeline
snake_case__ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ : Tuple = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=0 ):
"""simple docstring"""
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : Tuple = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : Dict = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 72 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : int = "retribert"
def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : int = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : int = share_encoders
_lowerCamelCase : Optional[Any] = projection_dim
| 72 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_lowerCamelCase : Optional[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCamelCase : Any = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_lowerCamelCase : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : int = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 72 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''MaskFormerFeatureExtractor''']
lowerCAmelCase__ = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
lowerCAmelCase__ = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 72 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Dict = is_training
_lowerCamelCase : str = use_auxiliary_loss
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : int = min_size
_lowerCamelCase : Any = max_size
_lowerCamelCase : int = num_labels
_lowerCamelCase : List[str] = mask_feature_size
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
_lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = output.encoder_hidden_states
_lowerCamelCase : Tuple = output.pixel_decoder_hidden_states
_lowerCamelCase : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case__ : Any = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : List[str] = False
snake_case__ : Optional[int] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MaskFormerModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
_lowerCamelCase : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(),
}
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCamelCase : Union[str, Any] = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1E-4
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : int = model(**__lowerCAmelCase )
_lowerCamelCase : str = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : List[str] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : str = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : Tuple = self.default_image_processor
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : Any = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[str] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
_lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase )
_lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']]
_lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 72 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
lowerCAmelCase__ = list[tuple[int, int]]
lowerCAmelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __snake_case :
def __init__( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Node | None ):
"""simple docstring"""
_lowerCamelCase : List[Any] = pos_x
_lowerCamelCase : List[Any] = pos_y
_lowerCamelCase : str = (pos_y, pos_x)
_lowerCamelCase : Tuple = goal_x
_lowerCamelCase : int = goal_y
_lowerCamelCase : int = parent
class __snake_case :
def __init__( self : Any , __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : tuple[int, int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , __lowerCAmelCase )
_lowerCamelCase : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , __lowerCAmelCase )
_lowerCamelCase : int = [self.start]
_lowerCamelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
while self.node_queue:
_lowerCamelCase : Dict = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_lowerCamelCase : Tuple = True
return self.retrace_path(__lowerCAmelCase )
_lowerCamelCase : List[str] = self.get_successors(__lowerCAmelCase )
for node in successors:
self.node_queue.append(__lowerCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Node ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = []
for action in delta:
_lowerCamelCase : List[str] = parent.pos_x + action[1]
_lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , __lowerCAmelCase ) )
return successors
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Node | None ):
"""simple docstring"""
_lowerCamelCase : Tuple = node
_lowerCamelCase : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCamelCase : Optional[int] = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = BreadthFirstSearch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = BreadthFirstSearch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : int = False
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_lowerCamelCase : Optional[Any] = self.fwd_bfs.node_queue.pop(0 )
_lowerCamelCase : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_lowerCamelCase : Optional[Any] = True
return self.retrace_bidirectional_path(
__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : str = current_bwd_node
_lowerCamelCase : Union[str, Any] = current_fwd_node
_lowerCamelCase : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(__lowerCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(__lowerCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__lowerCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.fwd_bfs.retrace_path(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.bwd_bfs.retrace_path(__lowerCAmelCase )
bwd_path.pop()
bwd_path.reverse()
_lowerCamelCase : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCAmelCase__ = (0, 0)
lowerCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = BreadthFirstSearch(init, goal)
lowerCAmelCase__ = bfs.search()
lowerCAmelCase__ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = BidirectionalBreadthFirstSearch(init, goal)
lowerCAmelCase__ = bd_bfs.search()
lowerCAmelCase__ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 72 |
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case_ ( A_ : dict, A_ : int, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] = set()
# keep track of all the paths to be checked
_lowerCamelCase : str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCamelCase : str = queue.pop(0 )
# get the last node from the path
_lowerCamelCase : List[Any] = path[-1]
if node not in explored:
_lowerCamelCase : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCamelCase : Union[str, Any] = list(A_ )
new_path.append(A_ )
queue.append(A_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A_ )
# in case there's no path between the 2 nodes
return []
def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCamelCase : Optional[int] = [start]
_lowerCamelCase : int = set(A_ )
# Keep tab on distances from `start` node.
_lowerCamelCase : int = {start: 0, target: -1}
while queue:
_lowerCamelCase : Optional[Any] = queue.pop(0 )
if node == target:
_lowerCamelCase : Any = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A_ )
queue.append(A_ )
_lowerCamelCase : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 72 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = tempfile.mkdtemp()
_lowerCamelCase : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_lowerCamelCase : List[str] = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_lowerCamelCase : str = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__lowerCAmelCase : int ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_lowerCamelCase : Dict = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[Any] = self.get_rust_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : List[Any] = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCamelCase : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
_lowerCamelCase : str = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCamelCase : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_lowerCamelCase : Tuple = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_lowerCamelCase : int = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[int] = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.prepare_image_inputs()
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : Tuple = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : List[str] = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : List[Any] = '''lower newer'''
_lowerCamelCase : Tuple = processor(text=__lowerCAmelCase )
_lowerCamelCase : str = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=6_4 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : str = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''lower newer'''
_lowerCamelCase : int = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Optional[int] = processor.batch_decode(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = '''lower newer'''
_lowerCamelCase : Tuple = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 72 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase : List[str] = LxmertForPreTraining(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A_, A_, A_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : list ):
'''simple docstring'''
if len(A_ ) <= 1:
return [tuple(A_ )]
_lowerCamelCase : Tuple = []
def generate(A_ : int, A_ : list ):
_lowerCamelCase : int = [0] * n
res.append(tuple(A_ ) )
_lowerCamelCase : str = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_lowerCamelCase , _lowerCamelCase : List[Any] = arr[i], arr[0]
else:
_lowerCamelCase , _lowerCamelCase : Dict = arr[i], arr[c[i]]
res.append(tuple(A_ ) )
c[i] += 1
_lowerCamelCase : Dict = 0
else:
_lowerCamelCase : List[Any] = 0
i += 1
generate(len(A_ ), A_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 72 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841
_lowerCamelCase : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowerCamelCase : Any = defaultdict(A_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_lowerCamelCase : List[str] = mst(A_ )
_lowerCamelCase : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_lowerCamelCase : int = tuple(answer[:2] )
_lowerCamelCase : int = tuple(edge[::-1] )
assert edge in result or reverse in result
| 72 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( _lowercase):
def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : str=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Dict=False , __lowerCAmelCase : str=False , __lowerCAmelCase : int=False , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=9_9 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : int=3_2 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : List[str]=5_1_2 , __lowerCAmelCase : Union[str, Any]=1_2 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : int=3 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Any="last" , __lowerCAmelCase : Any=None , __lowerCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
_lowerCamelCase : Any = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = is_training
_lowerCamelCase : Dict = use_input_lengths
_lowerCamelCase : List[Any] = use_token_type_ids
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : Any = gelu_activation
_lowerCamelCase : Union[str, Any] = sinusoidal_embeddings
_lowerCamelCase : int = causal
_lowerCamelCase : Optional[int] = asm
_lowerCamelCase : List[Any] = n_langs
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Dict = n_special
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : List[Any] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : Optional[Any] = summary_type
_lowerCamelCase : Optional[int] = use_proj
_lowerCamelCase : List[str] = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Dict = None
if self.use_input_lengths:
_lowerCamelCase : Dict = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_lowerCamelCase : str = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , 2 ).float()
_lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Optional[int] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , ):
"""simple docstring"""
_lowerCamelCase : Any = FlaubertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase )
_lowerCamelCase : Tuple = model(__lowerCAmelCase , langs=__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = FlaubertWithLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , ):
"""simple docstring"""
_lowerCamelCase : Dict = FlaubertForQuestionAnsweringSimple(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : List[str] = FlaubertForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
_lowerCamelCase : Dict = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , )
_lowerCamelCase : str = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , )
((_lowerCamelCase) , ) : Dict = result_with_labels.to_tuple()
_lowerCamelCase : str = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
((_lowerCamelCase) , ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , ):
"""simple docstring"""
_lowerCamelCase : Tuple = FlaubertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Any = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : Any = FlaubertForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_choices
_lowerCamelCase : int = FlaubertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Optional[int] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : int = config_and_inputs
_lowerCamelCase : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : Union[str, Any] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : int = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_lowerCamelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = FlaubertModelTester(self )
_lowerCamelCase : int = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = FlaubertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_lowerCamelCase : List[Any] = True
_lowerCamelCase : List[str] = model_class(config=__lowerCAmelCase )
_lowerCamelCase : List[Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : str = torch.jit.trace(
__lowerCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) )
_lowerCamelCase : Union[str, Any] = torch.jit.load(os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) , map_location=__lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(__lowerCAmelCase ) , inputs_dict['''attention_mask'''].to(__lowerCAmelCase ) )
@require_torch
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_lowerCamelCase : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
_lowerCamelCase : str = model(__lowerCAmelCase )[0]
_lowerCamelCase : List[str] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __lowerCAmelCase )
_lowerCamelCase : Dict = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 72 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __snake_case ( _lowercase):
snake_case__ : Any = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
snake_case__ : Any = BartTokenizer
def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = '''post_processor'''
_lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
_lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase : int = tuple(state['''cls'''] )
_lowerCamelCase : Union[str, Any] = False
if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = add_prefix_space
_lowerCamelCase : Optional[Any] = True
if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets:
_lowerCamelCase : Any = trim_offsets
_lowerCamelCase : str = True
if changes_to_apply:
_lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) )
_lowerCamelCase : str = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
_lowerCamelCase : str = value
def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : str ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(A_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 72 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : str ):
'''simple docstring'''
return [ord(A_ ) - 96 for elem in plain]
def snake_case_ ( A_ : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''', A_ )
print('''Decoded:''', decode(A_ ) )
if __name__ == "__main__":
main()
| 72 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_lowerCamelCase : int = torch.manual_seed(0 )
_lowerCamelCase : Dict = pipe(
image=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_lowerCamelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : List[Any] = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = np.max(A_, axis=-1, keepdims=A_ )
_lowerCamelCase : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=A_ )
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : Any , **__lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Tuple = {}
if "second_text" in kwargs:
_lowerCamelCase : Dict = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Any=None ):
"""simple docstring"""
return self.tokenizer(__lowerCAmelCase , text_pair=__lowerCAmelCase , return_tensors=self.framework )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.model(**__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = model_outputs.logits[0].numpy()
_lowerCamelCase : List[Any] = softmax(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = np.argmax(__lowerCAmelCase )
_lowerCamelCase : Any = self.model.config.idalabel[best_class]
_lowerCamelCase : Dict = probabilities[best_class].item()
_lowerCamelCase : List[str] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A_ ):
if len(A_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A_ ) )
return data_lists
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for dlist, weight in zip(A_, A_ ):
_lowerCamelCase : Any = min(A_ )
_lowerCamelCase : Optional[Any] = max(A_ )
_lowerCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowerCamelCase : str = F'''Invalid weight of {weight:f} provided'''
raise ValueError(A_ )
score_lists.append(A_ )
return score_lists
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A_ ):
_lowerCamelCase : List[str] = final_scores[j] + ele
return final_scores
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = get_data(A_ )
_lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ )
_lowerCamelCase : str = generate_final_scores(A_ )
# append scores to source data
for i, ele in enumerate(A_ ):
source_data[i].append(A_ )
return source_data
| 72 | 1 |
"""simple docstring"""
lowerCAmelCase__ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 72 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "unispeech"
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = feat_extract_norm
_lowerCamelCase : List[Any] = feat_extract_activation
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : Tuple = list(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : List[str] = conv_bias
_lowerCamelCase : List[str] = num_conv_pos_embeddings
_lowerCamelCase : Tuple = num_conv_pos_embedding_groups
_lowerCamelCase : List[str] = len(self.conv_dim )
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Optional[Any] = feat_proj_dropout
_lowerCamelCase : Optional[int] = final_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[str] = num_ctc_classes
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = do_stable_layer_norm
_lowerCamelCase : Tuple = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Any = apply_spec_augment
_lowerCamelCase : Dict = mask_time_prob
_lowerCamelCase : List[str] = mask_time_length
_lowerCamelCase : Optional[Any] = mask_time_min_masks
_lowerCamelCase : List[str] = mask_feature_prob
_lowerCamelCase : int = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : Optional[Any] = num_codevectors_per_group
_lowerCamelCase : int = num_codevector_groups
_lowerCamelCase : List[Any] = contrastive_logits_temperature
_lowerCamelCase : List[str] = feat_quantizer_dropout
_lowerCamelCase : Dict = num_negatives
_lowerCamelCase : Optional[int] = codevector_dim
_lowerCamelCase : List[Any] = proj_codevector_dim
_lowerCamelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCamelCase : Union[str, Any] = ctc_loss_reduction
_lowerCamelCase : Any = ctc_zero_infinity
# pretraining loss
_lowerCamelCase : str = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 72 | 1 |
"""simple docstring"""
from pathlib import Path
import fire
def snake_case_ ( A_ : str, A_ : str, A_ : int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = Path(A_ )
_lowerCamelCase : Optional[int] = Path(A_ )
dest_dir.mkdir(exist_ok=A_ )
for path in src_dir.iterdir():
_lowerCamelCase : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
_lowerCamelCase : int = dest_dir.joinpath(path.name )
print(A_ )
dest_path.open('''w''' ).write('''\n'''.join(A_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 72 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
_lowerCamelCase : Optional[Any] = quote(A_ )
return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
| 72 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
_lowerCamelCase : str = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[int] = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : int = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : Dict = model(__lowerCAmelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCAmelCase , atol=1E-3 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
_lowerCamelCase : Tuple = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : int = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(__lowerCAmelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCAmelCase , atol=1E-3 ) )
| 72 |
"""simple docstring"""
import unittest
import numpy as np
def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
if shape_a[0] != shape_b[0]:
_lowerCamelCase : Tuple = (
'''Expected the same number of rows for A and B. '''
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(A_ )
if shape_b[1] != shape_c[1]:
_lowerCamelCase : Tuple = (
'''Expected the same number of columns for B and C. '''
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(A_ )
_lowerCamelCase : List[str] = pseudo_inv
if a_inv is None:
try:
_lowerCamelCase : Any = np.linalg.inv(A_ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] )
_lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] )
_lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase )
self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : int = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 72 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
snake_case__ : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
snake_case__ : Union[str, Any] = "document_qa"
snake_case__ : Optional[Any] = AutoProcessor
snake_case__ : Tuple = VisionEncoderDecoderModel
snake_case__ : List[Any] = ["image", "text"]
snake_case__ : Tuple = ["text"]
def __init__( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : "Image" , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase : Optional[Any] = task_prompt.replace('''{user_input}''' , __lowerCAmelCase )
_lowerCamelCase : str = self.pre_processor.tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors='''pt''' ).input_ids
_lowerCamelCase : Any = self.pre_processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowerCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowerCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowerCAmelCase , ).sequences
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = self.pre_processor.batch_decode(__lowerCAmelCase )[0]
_lowerCamelCase : Optional[int] = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_lowerCamelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_lowerCamelCase : int = re.sub(R'''<.*?>''' , '''''' , __lowerCAmelCase , count=1 ).strip() # remove first task start token
_lowerCamelCase : Dict = self.pre_processor.tokenajson(__lowerCAmelCase )
return sequence["answer"]
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[int], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = int(A_ )
# Initialize Result
_lowerCamelCase : Dict = []
# Traverse through all denomination
for denomination in reversed(A_ ):
# Find denominations
while int(A_ ) >= int(A_ ):
total_value -= int(A_ )
answer.append(A_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase__ = []
lowerCAmelCase__ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
lowerCAmelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 72 | 1 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCAmelCase__ = data_utils.TransfoXLTokenizer
lowerCAmelCase__ = data_utils.TransfoXLCorpus
lowerCAmelCase__ = data_utils
lowerCAmelCase__ = data_utils
def snake_case_ ( A_ : Optional[Any], A_ : List[Any], A_ : Any, A_ : Optional[Any] ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(A_, '''rb''' ) as fp:
_lowerCamelCase : Union[str, Any] = pickle.load(A_, encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
_lowerCamelCase : Union[str, Any] = corpus.vocab.__dict__
torch.save(A_, A_ )
_lowerCamelCase : List[str] = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''', A_ )
_lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(A_, A_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_lowerCamelCase : List[str] = os.path.abspath(A_ )
_lowerCamelCase : Optional[Any] = os.path.abspath(A_ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_lowerCamelCase : Dict = TransfoXLConfig()
else:
_lowerCamelCase : Dict = TransfoXLConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase : str = TransfoXLLMHeadModel(A_ )
_lowerCamelCase : Dict = load_tf_weights_in_transfo_xl(A_, A_, A_ )
# Save pytorch-model
_lowerCamelCase : Dict = os.path.join(A_, A_ )
_lowerCamelCase : Union[str, Any] = os.path.join(A_, A_ )
print(F'''Save PyTorch model to {os.path.abspath(A_ )}''' )
torch.save(model.state_dict(), A_ )
print(F'''Save configuration file to {os.path.abspath(A_ )}''' )
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : int = 2_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : int = [0 for i in range(n + 1 )]
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Any = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, A_ ):
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = 0
for i in range(A_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 | 1 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase__ = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase__ = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase__ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase__ = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase__ = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase__ = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase__ = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : str = randrange(len(A_ ) ), randrange(len(A_ ) )
_lowerCamelCase : Optional[Any] = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
_lowerCamelCase , _lowerCamelCase : Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def snake_case_ ( A_ : int = 1_00 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(A_ ))
@pytest.mark.parametrize('''hand, expected''', A_ )
def snake_case_ ( A_ : Union[str, Any], A_ : str ):
'''simple docstring'''
assert PokerHand(A_ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''', A_ )
def snake_case_ ( A_ : Optional[Any], A_ : Tuple ):
'''simple docstring'''
assert PokerHand(A_ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''', A_ )
def snake_case_ ( A_ : Tuple, A_ : str, A_ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = PokerHand(A_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''', A_ )
def snake_case_ ( A_ : Tuple, A_ : Any ):
'''simple docstring'''
assert PokerHand(A_ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''', A_ )
def snake_case_ ( A_ : Any, A_ : Optional[int] ):
'''simple docstring'''
assert PokerHand(A_ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''', A_ )
def snake_case_ ( A_ : int, A_ : List[Any], A_ : Optional[Any] ):
'''simple docstring'''
assert PokerHand(A_ ).compare_with(PokerHand(A_ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''', generate_random_hands() )
def snake_case_ ( A_ : int, A_ : Optional[int], A_ : Optional[Any] ):
'''simple docstring'''
assert PokerHand(A_ ).compare_with(PokerHand(A_ ) ) == expected
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = [PokerHand(A_ ) for hand in SORTED_HANDS]
_lowerCamelCase : List[str] = poker_hands.copy()
shuffle(A_ )
_lowerCamelCase : List[Any] = chain(sorted(A_ ) )
for index, hand in enumerate(A_ ):
assert hand == poker_hands[index]
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=A_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = PokerHand('''2C 4S AS 3D 5C''' )
_lowerCamelCase : str = True
_lowerCamelCase : List[str] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = 0
_lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(A_ ) )
_lowerCamelCase : Dict = os.path.join(A_, '''poker_hands.txt''' )
with open(A_ ) as file_hand:
for line in file_hand:
_lowerCamelCase : List[Any] = line[:14].strip()
_lowerCamelCase : str = line[15:].strip()
_lowerCamelCase , _lowerCamelCase : Dict = PokerHand(A_ ), PokerHand(A_ )
_lowerCamelCase : Union[str, Any] = player.compare_with(A_ )
if output == "Win":
answer += 1
assert answer == 3_76
| 72 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Any = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape
_lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ )
_lowerCamelCase : str = emb.weight.data
return lin_layer
def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model''']
remove_ignore_keys_(A_ )
_lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ )
if mbart_aa and finetuned:
_lowerCamelCase : Any = '''relu'''
_lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight''']
_lowerCamelCase : Any = MBartForConditionalGeneration(A_ )
model.model.load_state_dict(A_ )
if finetuned:
_lowerCamelCase : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 72 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_lowercase):
snake_case__ : Union[str, Any] = ["torch", "scipy"]
def __init__( self : List[Any] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = current_set.copy()
for row_index, row in enumerate(A_ ):
_lowerCamelCase : Tuple = row[0]
for column_index, column in enumerate(A_ ):
if magnitude == 0:
_lowerCamelCase : List[Any] = column
continue
_lowerCamelCase : List[Any] = column / magnitude
# Subtract to cancel term
_lowerCamelCase : Union[str, Any] = current_set[0]
_lowerCamelCase : Dict = [first_row]
_lowerCamelCase : str = current_set[1::]
for row in current_set:
_lowerCamelCase : Union[str, Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A_ )
continue
for column_index in range(len(A_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_lowerCamelCase : Any = final_set[0]
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_lowerCamelCase : Dict = simplify(A_ )
for i in range(len(A_ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, A_ )
_lowerCamelCase : Tuple = resultant
return final_set
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
if len(A_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
_lowerCamelCase : Dict = len(A_ ) + 1
if any(len(A_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(A_, (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(A_ ) == 1:
return [equations[0][-1] / equations[0][0]]
_lowerCamelCase : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
_lowerCamelCase : str = data_set.copy()
_lowerCamelCase : List[Any] = []
for row_index, row in enumerate(A_ ):
if 0 not in row:
_lowerCamelCase : Union[str, Any] = data_set.pop(A_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0, A_ )
_lowerCamelCase : List[str] = data_set.copy()
_lowerCamelCase : int = simplify(A_ )
_lowerCamelCase : int = simplified[::-1]
_lowerCamelCase : list = []
for row in simplified:
_lowerCamelCase : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A_ ) == 0:
solutions.append(0 )
continue
_lowerCamelCase : Tuple = temp_row[1::]
_lowerCamelCase : Tuple = temp_row[::-1]
for column_index, column in enumerate(A_ ):
current_solution -= column * solutions[column_index]
solutions.append(A_ )
_lowerCamelCase : Optional[int] = []
for item in solutions:
final.append(float(round(A_, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 72 | 1 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase__ = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : bool , __lowerCAmelCase : str = None , __lowerCAmelCase : list = None ):
"""simple docstring"""
_lowerCamelCase : str = None
_lowerCamelCase : Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
_lowerCamelCase : List[Any] = os.path.abspath('''examples''' )
for item in os.listdir(__lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
_lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__lowerCAmelCase , feature_script=__lowerCAmelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
_lowerCamelCase : Tuple = compare_against_test(
os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''\n'''.join(__lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
_lowerCamelCase : Dict = diff.replace(__lowerCAmelCase , '''''' )
self.assertEqual(__lowerCAmelCase , '''''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''' , __lowerCAmelCase )
self.one_complete_example('''complete_nlp_example.py''' , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
_lowerCamelCase : List[Any] = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.one_complete_example('''complete_cv_example.py''' , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"})
class __snake_case ( _lowercase):
snake_case__ : Tuple = False
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] ):
"""simple docstring"""
super().setUpClass()
_lowerCamelCase : Tuple = tempfile.mkdtemp()
_lowerCamelCase : List[Any] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_lowerCamelCase : int = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
_lowerCamelCase : Any = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
_lowerCamelCase : List[str] = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
self.assertNotIn('''epoch 0:''' , __lowerCAmelCase )
self.assertIn('''epoch 1:''' , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
_lowerCamelCase : int = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
if torch.cuda.is_available():
_lowerCamelCase : str = torch.cuda.device_count()
else:
_lowerCamelCase : List[str] = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __lowerCAmelCase )
self.assertIn('''epoch 1:''' , __lowerCAmelCase )
else:
self.assertIn('''epoch 0:''' , __lowerCAmelCase )
self.assertIn('''epoch 1:''' , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
_lowerCamelCase : List[str] = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
_lowerCamelCase : int = re.findall('''({.+})''' , __lowerCAmelCase )
_lowerCamelCase : List[str] = [r for r in results if '''accuracy''' in r][-1]
_lowerCamelCase : Dict = ast.literal_eval(__lowerCAmelCase )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
_lowerCamelCase : Tuple = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , '''tracking''' ) ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 72 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "Speech2TextFeatureExtractor"
snake_case__ : Union[str, Any] = "Speech2TextTokenizer"
def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : str = False
def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase : str = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : List[str] = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = self.tokenizer
yield
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : Tuple = False
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : Optional[Any], A_ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_lowerCamelCase : List[str] = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
_lowerCamelCase : List[Any] = min(A_, A_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter image url: ''').strip()
print(F"""Downloading image from {url} ...""")
lowerCAmelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCAmelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCAmelCase__ = requests.get(image_url).content
lowerCAmelCase__ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int ):
'''simple docstring'''
if not isinstance(A_, A_ ):
_lowerCamelCase : Any = F'''Input value of [number={number}] must be an integer'''
raise TypeError(A_ )
if number < 1:
_lowerCamelCase : Tuple = F'''Input value of [number={number}] must be > 0'''
raise ValueError(A_ )
_lowerCamelCase : List[str] = 1
for i in range(1, A_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
"""simple docstring"""
import math
def snake_case_ ( A_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( A_ : float = 0.1 ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = 3
_lowerCamelCase : List[str] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ):
primes += is_prime(A_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCAmelCase__ = random.Random()
def snake_case_ ( A_ : Tuple, A_ : List[str]=1.0, A_ : Optional[int]=None, A_ : List[str]=None ):
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = global_rng
_lowerCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __snake_case ( unittest.TestCase):
def __init__( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : str=4_0_0 , __lowerCAmelCase : Optional[Any]=2_0_0_0 , __lowerCAmelCase : str=1 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Dict=1_6_0_0_0 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : str=True , ):
"""simple docstring"""
_lowerCamelCase : int = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : str = min_seq_length
_lowerCamelCase : List[str] = max_seq_length
_lowerCamelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase : List[Any] = feature_size
_lowerCamelCase : Dict = padding_value
_lowerCamelCase : int = sampling_rate
_lowerCamelCase : int = return_attention_mask
_lowerCamelCase : List[Any] = do_normalize
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Tuple=False ):
"""simple docstring"""
def _flatten(__lowerCAmelCase : int ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
_lowerCamelCase : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCamelCase : Optional[Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase : Union[str, Any] = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Tuple = WavaVecaFeatureExtractor
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Any = WavaVecaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(__lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : int = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
_lowerCamelCase : List[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_lowerCamelCase : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test batched
_lowerCamelCase : List[Any] = feat_extract(__lowerCAmelCase , return_tensors='''np''' ).input_values
_lowerCamelCase : Optional[Any] = feat_extract(__lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase : List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase : Optional[int] = np.asarray(__lowerCAmelCase )
_lowerCamelCase : Tuple = feat_extract(__lowerCAmelCase , return_tensors='''np''' ).input_values
_lowerCamelCase : Any = feat_extract(__lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : List[str] = ['''longest''', '''max_length''', '''do_not_pad''']
_lowerCamelCase : Tuple = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : str = feat_extract(__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Dict = range(8_0_0 , 1_4_0_0 , 2_0_0 )
_lowerCamelCase : Tuple = [floats_list((1, x) )[0] for x in lengths]
_lowerCamelCase : Optional[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
_lowerCamelCase : str = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : Tuple = feat_extract(__lowerCAmelCase , max_length=__lowerCAmelCase , padding=__lowerCAmelCase )
_lowerCamelCase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : str = feat_extract(
__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
_lowerCamelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : Union[str, Any] = feat_extract(
__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_lowerCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
_lowerCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : List[str] = feat_extract(
__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_lowerCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
import torch
_lowerCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Tuple = np.random.rand(1_0_0 ).astype(np.floataa )
_lowerCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase : List[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCamelCase : List[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_lowerCamelCase : Optional[Any] = WavaVecaConfig.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 72 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : int = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] )
_lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase )
_lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
_lowerCamelCase : int = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
_lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' )
_lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = -1
_lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n"
_lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = -1
_lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 )
_lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 72 | 1 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 72 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : int = "retribert"
def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : int = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : int = share_encoders
_lowerCamelCase : Optional[Any] = projection_dim
| 72 | 1 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def snake_case_ ( A_ : Optional[Any], A_ : int ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def snake_case_ ( A_ : Tuple ):
'''simple docstring'''
config.addinivalue_line('''markers''', '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=A_ )
def snake_case_ ( A_ : List[str], A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.getbasetemp() / '''cache'''
_lowerCamelCase : Tuple = test_hf_cache_home / '''datasets'''
_lowerCamelCase : List[str] = test_hf_cache_home / '''metrics'''
_lowerCamelCase : Optional[int] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''', str(A_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''', str(A_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''', str(A_ ) )
_lowerCamelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''', str(A_ ) )
_lowerCamelCase : Optional[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''', str(A_ ) )
@pytest.fixture(autouse=A_, scope='''session''' )
def snake_case_ ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=A_ )
def snake_case_ ( A_ : Tuple ):
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''', A_ )
@pytest.fixture
def snake_case_ ( A_ : str ):
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''', A_ )
| 72 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_lowerCamelCase : Optional[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCamelCase : Any = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_lowerCamelCase : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : int = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 72 | 1 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
_lowerCamelCase : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
_lowerCamelCase : Any = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
_lowerCamelCase : List[Any] = [sys.executable] + distributed_args
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
| 72 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Dict = is_training
_lowerCamelCase : str = use_auxiliary_loss
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : int = min_size
_lowerCamelCase : Any = max_size
_lowerCamelCase : int = num_labels
_lowerCamelCase : List[str] = mask_feature_size
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
_lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = output.encoder_hidden_states
_lowerCamelCase : Tuple = output.pixel_decoder_hidden_states
_lowerCamelCase : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case__ : Any = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : List[str] = False
snake_case__ : Optional[int] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MaskFormerModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
_lowerCamelCase : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(),
}
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCamelCase : Union[str, Any] = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1E-4
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : int = model(**__lowerCAmelCase )
_lowerCamelCase : str = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : List[str] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : str = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : Tuple = self.default_image_processor
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : Any = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[str] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
_lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase )
_lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']]
_lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 72 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __snake_case :
def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=1_3 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=9_9 , __lowerCAmelCase : Dict=[1, 1, 2] , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Dict=3_2 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : List[str]=8 , __lowerCAmelCase : Tuple=3_7 , __lowerCAmelCase : List[Any]="gelu_new" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : List[Any]=5_1_2 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : int=3 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[int]=False , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Dict = seq_length
_lowerCamelCase : int = is_training
_lowerCamelCase : Dict = use_input_mask
_lowerCamelCase : Dict = use_token_type_ids
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Optional[Any] = block_sizes
_lowerCamelCase : List[Any] = num_decoder_layers
_lowerCamelCase : List[Any] = d_model
_lowerCamelCase : Any = n_head
_lowerCamelCase : List[str] = d_head
_lowerCamelCase : Optional[int] = d_inner
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : List[str] = hidden_dropout
_lowerCamelCase : Any = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : Dict = type_vocab_size
_lowerCamelCase : Tuple = 2
_lowerCamelCase : Dict = num_labels
_lowerCamelCase : Tuple = num_choices
_lowerCamelCase : int = scope
_lowerCamelCase : Optional[int] = initializer_std
# Used in the tests to check the size of the first attention layer
_lowerCamelCase : Optional[Any] = n_head
# Used in the tests to check the size of the first hidden state
_lowerCamelCase : Optional[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_lowerCamelCase : Any = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_lowerCamelCase : List[str] = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : str = None
if self.use_input_mask:
_lowerCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] = None
if self.use_token_type_ids:
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Any = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , ):
"""simple docstring"""
_lowerCamelCase : int = TFFunnelModel(config=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : List[Any] = [input_ids, input_mask]
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = TFFunnelModel(config=__lowerCAmelCase )
_lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_lowerCamelCase : Tuple = False
_lowerCamelCase : Tuple = TFFunnelModel(config=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
_lowerCamelCase : Tuple = TFFunnelBaseModel(config=__lowerCAmelCase )
_lowerCamelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Any = [input_ids, input_mask]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
_lowerCamelCase : Any = False
_lowerCamelCase : Optional[Any] = TFFunnelBaseModel(config=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
_lowerCamelCase : Any = False
_lowerCamelCase : List[str] = TFFunnelBaseModel(config=__lowerCAmelCase )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
_lowerCamelCase : Tuple = TFFunnelForPreTraining(config=__lowerCAmelCase )
_lowerCamelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
_lowerCamelCase : str = TFFunnelForMaskedLM(config=__lowerCAmelCase )
_lowerCamelCase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : Optional[Any] = TFFunnelForSequenceClassification(config=__lowerCAmelCase )
_lowerCamelCase : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Any = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : str = self.num_choices
_lowerCamelCase : Any = TFFunnelForMultipleChoice(config=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : str = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : List[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_lowerCamelCase : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Optional[int] = TFFunnelForTokenClassification(config=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
_lowerCamelCase : Dict = TFFunnelForQuestionAnswering(config=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : str = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Dict = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ : str = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ : Union[str, Any] = False
snake_case__ : Any = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = TFFunnelModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self , config_class=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
@require_tf
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Optional[int] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case__ : Union[str, Any] = False
snake_case__ : int = False
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = TFFunnelModelTester(self , base=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
| 72 |
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case_ ( A_ : dict, A_ : int, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] = set()
# keep track of all the paths to be checked
_lowerCamelCase : str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCamelCase : str = queue.pop(0 )
# get the last node from the path
_lowerCamelCase : List[Any] = path[-1]
if node not in explored:
_lowerCamelCase : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCamelCase : Union[str, Any] = list(A_ )
new_path.append(A_ )
queue.append(A_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A_ )
# in case there's no path between the 2 nodes
return []
def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCamelCase : Optional[int] = [start]
_lowerCamelCase : int = set(A_ )
# Keep tab on distances from `start` node.
_lowerCamelCase : int = {start: 0, target: -1}
while queue:
_lowerCamelCase : Optional[Any] = queue.pop(0 )
if node == target:
_lowerCamelCase : Any = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A_ )
queue.append(A_ )
_lowerCamelCase : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : list[int], A_ : list[int] ):
'''simple docstring'''
if not len(A_ ) == len(A_ ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = equationa
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = equationa
# Calculate the determinants of the matrices
_lowerCamelCase : Any = aa * ba - aa * ba
_lowerCamelCase : str = ca * ba - ca * ba
_lowerCamelCase : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowerCamelCase : str = determinant_x / determinant
_lowerCamelCase : Optional[int] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 72 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase : List[str] = LxmertForPreTraining(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A_, A_, A_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 72 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Dict ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
snake_case__ : Optional[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_lowerCamelCase : Optional[Any] = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = vqa_pipeline(__lowerCAmelCase , top_k=1 )
self.assertEqual(
__lowerCAmelCase , [
[{'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}],
[{'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}],
] , )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_lowerCamelCase : Optional[int] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_lowerCamelCase : Tuple = '''How many cats are there?'''
_lowerCamelCase : Union[str, Any] = vqa_pipeline(image=__lowerCAmelCase , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [{'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}, {'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}] )
_lowerCamelCase : Dict = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [{'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}, {'''score''': ANY(__lowerCAmelCase ), '''answer''': ANY(__lowerCAmelCase )}] )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_lowerCamelCase : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_lowerCamelCase : str = '''How many cats are there?'''
_lowerCamelCase : List[str] = vqa_pipeline(image=__lowerCAmelCase , question=__lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] )
_lowerCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] )
_lowerCamelCase : str = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=4 ) , [[{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
| 72 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841
_lowerCamelCase : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowerCamelCase : Any = defaultdict(A_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_lowerCamelCase : List[str] = mst(A_ )
_lowerCamelCase : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_lowerCamelCase : int = tuple(answer[:2] )
_lowerCamelCase : int = tuple(edge[::-1] )
assert edge in result or reverse in result
| 72 | 1 |
"""simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase__ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Any = find_backend(''' if not is_torch_available():''' )
self.assertEqual(__lowerCAmelCase , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_lowerCamelCase : Tuple = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(__lowerCAmelCase , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_lowerCamelCase : Any = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(__lowerCAmelCase , '''torch_and_transformers_and_onnx''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __lowerCAmelCase )
self.assertIn('''torch_and_transformers''' , __lowerCAmelCase )
self.assertIn('''flax_and_transformers''' , __lowerCAmelCase )
self.assertIn('''torch_and_transformers_and_onnx''' , __lowerCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(__lowerCAmelCase , '''\nCONSTANT = None\n''' )
_lowerCamelCase : Any = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
__lowerCAmelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_lowerCamelCase : Optional[Any] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
_lowerCamelCase : Optional[int] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
_lowerCamelCase : Dict = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , __lowerCAmelCase )
| 72 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __snake_case ( _lowercase):
snake_case__ : Any = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
snake_case__ : Any = BartTokenizer
def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = '''post_processor'''
_lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
_lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase : int = tuple(state['''cls'''] )
_lowerCamelCase : Union[str, Any] = False
if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = add_prefix_space
_lowerCamelCase : Optional[Any] = True
if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets:
_lowerCamelCase : Any = trim_offsets
_lowerCamelCase : str = True
if changes_to_apply:
_lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) )
_lowerCamelCase : str = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
_lowerCamelCase : str = value
def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int ):
'''simple docstring'''
assert isinstance(A_, A_ ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
_lowerCamelCase : List[Any] = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(A_ )
else:
_lowerCamelCase : Dict = sylvester(number - 1 )
_lowerCamelCase : Dict = num - 1
_lowerCamelCase : Optional[int] = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 72 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : str ):
'''simple docstring'''
return [ord(A_ ) - 96 for elem in plain]
def snake_case_ ( A_ : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''', A_ )
print('''Decoded:''', decode(A_ ) )
if __name__ == "__main__":
main()
| 72 | 1 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {'''UserAgent''': UserAgent().random}
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = script.contents[0]
_lowerCamelCase : Optional[Any] = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __snake_case :
def __init__( self : str , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = f'''https://www.instagram.com/{username}/'''
_lowerCamelCase : Optional[int] = self.get_json()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : str = requests.get(self.url , headers=__lowerCAmelCase ).text
_lowerCamelCase : List[str] = BeautifulSoup(__lowerCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ):
"""simple docstring"""
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : Any ):
"""simple docstring"""
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.user_data["is_private"]
def snake_case_ ( A_ : str = "github" ):
'''simple docstring'''
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_lowerCamelCase : List[Any] = InstagramUser(A_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data, A_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser('''github''')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCAmelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
lowerCAmelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
lowerCAmelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
lowerCAmelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
lowerCAmelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A_ ):
if len(A_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A_ ) )
return data_lists
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for dlist, weight in zip(A_, A_ ):
_lowerCamelCase : Any = min(A_ )
_lowerCamelCase : Optional[Any] = max(A_ )
_lowerCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowerCamelCase : str = F'''Invalid weight of {weight:f} provided'''
raise ValueError(A_ )
score_lists.append(A_ )
return score_lists
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A_ ):
_lowerCamelCase : List[str] = final_scores[j] + ele
return final_scores
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = get_data(A_ )
_lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ )
_lowerCamelCase : str = generate_final_scores(A_ )
# append scores to source data
for i, ele in enumerate(A_ ):
source_data[i].append(A_ )
return source_data
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : str, A_ : str ):
'''simple docstring'''
if not (isinstance(A_, A_ ) and isinstance(A_, A_ )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
_lowerCamelCase : List[Any] = len(A_ )
_lowerCamelCase : Optional[Any] = len(A_ )
_lowerCamelCase : Tuple = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Tuple = 0
for i in range(1, texta_length + 1 ):
for j in range(1, texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_lowerCamelCase : Any = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_lowerCamelCase : Optional[int] = i
_lowerCamelCase : Optional[Any] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "unispeech"
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = feat_extract_norm
_lowerCamelCase : List[Any] = feat_extract_activation
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : Tuple = list(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : List[str] = conv_bias
_lowerCamelCase : List[str] = num_conv_pos_embeddings
_lowerCamelCase : Tuple = num_conv_pos_embedding_groups
_lowerCamelCase : List[str] = len(self.conv_dim )
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Optional[Any] = feat_proj_dropout
_lowerCamelCase : Optional[int] = final_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[str] = num_ctc_classes
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = do_stable_layer_norm
_lowerCamelCase : Tuple = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Any = apply_spec_augment
_lowerCamelCase : Dict = mask_time_prob
_lowerCamelCase : List[str] = mask_time_length
_lowerCamelCase : Optional[Any] = mask_time_min_masks
_lowerCamelCase : List[str] = mask_feature_prob
_lowerCamelCase : int = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : Optional[Any] = num_codevectors_per_group
_lowerCamelCase : int = num_codevector_groups
_lowerCamelCase : List[Any] = contrastive_logits_temperature
_lowerCamelCase : List[str] = feat_quantizer_dropout
_lowerCamelCase : Dict = num_negatives
_lowerCamelCase : Optional[int] = codevector_dim
_lowerCamelCase : List[Any] = proj_codevector_dim
_lowerCamelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCamelCase : Union[str, Any] = ctc_loss_reduction
_lowerCamelCase : Any = ctc_zero_infinity
# pretraining loss
_lowerCamelCase : str = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 72 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
_lowerCamelCase : Dict = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(A_ ).content
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter Video/IGTV url: ''').strip()
lowerCAmelCase__ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 72 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
_lowerCamelCase : Optional[Any] = quote(A_ )
return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
| 72 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
"""simple docstring"""
import unittest
import numpy as np
def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
if shape_a[0] != shape_b[0]:
_lowerCamelCase : Tuple = (
'''Expected the same number of rows for A and B. '''
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(A_ )
if shape_b[1] != shape_c[1]:
_lowerCamelCase : Tuple = (
'''Expected the same number of columns for B and C. '''
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(A_ )
_lowerCamelCase : List[str] = pseudo_inv
if a_inv is None:
try:
_lowerCamelCase : Any = np.linalg.inv(A_ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] )
_lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] )
_lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase )
self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : int = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 72 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = "poolformer"
def __init__( self : List[Any] , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Dict=1_6 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4.0 , __lowerCAmelCase : List[Any]=[2, 2, 6, 2] , __lowerCAmelCase : Union[str, Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , __lowerCAmelCase : Any=[7, 3, 3, 3] , __lowerCAmelCase : Optional[Any]=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[2, 1, 1, 1] , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=1E-5 , __lowerCAmelCase : int=0.02 , **__lowerCAmelCase : List[Any] , ):
"""simple docstring"""
_lowerCamelCase : int = num_channels
_lowerCamelCase : Any = patch_size
_lowerCamelCase : List[str] = stride
_lowerCamelCase : int = padding
_lowerCamelCase : Tuple = pool_size
_lowerCamelCase : List[Any] = hidden_sizes
_lowerCamelCase : Tuple = mlp_ratio
_lowerCamelCase : Union[str, Any] = depths
_lowerCamelCase : Optional[Any] = patch_sizes
_lowerCamelCase : Dict = strides
_lowerCamelCase : Optional[int] = num_encoder_blocks
_lowerCamelCase : Optional[Any] = drop_path_rate
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : int = use_layer_scale
_lowerCamelCase : Optional[Any] = layer_scale_init_value
_lowerCamelCase : Dict = initializer_range
super().__init__(**__lowerCAmelCase )
class __snake_case ( _lowercase):
snake_case__ : Any = version.parse("1.11")
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return 2E-3
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[int], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = int(A_ )
# Initialize Result
_lowerCamelCase : Dict = []
# Traverse through all denomination
for denomination in reversed(A_ ):
# Find denominations
while int(A_ ) >= int(A_ ):
total_value -= int(A_ )
answer.append(A_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase__ = []
lowerCAmelCase__ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
lowerCAmelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 72 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __snake_case :
def __init__( self : Tuple , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = value
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
class __snake_case :
def __init__( self : Optional[int] , __lowerCAmelCase : Node ):
"""simple docstring"""
_lowerCamelCase : List[str] = tree
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Node | None ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : int = 2_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : int = [0 for i in range(n + 1 )]
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Any = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, A_ ):
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = 0
for i in range(A_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : List[str] = KandinskyImgaImgPipeline
snake_case__ : Any = ["prompt", "image_embeds", "negative_image_embeds", "image"]
snake_case__ : str = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
snake_case__ : Optional[int] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case__ : List[Any] = False
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return 1_0_0
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_lowerCamelCase : Tuple = MultilingualCLIP(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : List[str] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_lowerCamelCase : List[Any] = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.dummy_text_encoder
_lowerCamelCase : Tuple = self.dummy_tokenizer
_lowerCamelCase : Dict = self.dummy_unet
_lowerCamelCase : int = self.dummy_movq
_lowerCamelCase : Any = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_lowerCamelCase : Optional[Any] = DDIMScheduler(**__lowerCAmelCase )
_lowerCamelCase : List[str] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : int=0 ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
_lowerCamelCase : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase : Dict = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : Dict = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : int = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Tuple = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = '''cpu'''
_lowerCamelCase : List[Any] = self.get_dummy_components()
_lowerCamelCase : Optional[int] = self.pipeline_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Dict = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = output.images
_lowerCamelCase : int = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCamelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase : List[Any] = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_lowerCamelCase : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_lowerCamelCase : List[Any] = '''A red cartoon frog, 4k'''
_lowerCamelCase : int = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_lowerCamelCase : Optional[Any] = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
_lowerCamelCase , _lowerCamelCase : List[str] = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_lowerCamelCase : List[Any] = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , )
_lowerCamelCase : List[str] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 72 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Any = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape
_lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ )
_lowerCamelCase : str = emb.weight.data
return lin_layer
def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model''']
remove_ignore_keys_(A_ )
_lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ )
if mbart_aa and finetuned:
_lowerCamelCase : Any = '''relu'''
_lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight''']
_lowerCamelCase : Any = MBartForConditionalGeneration(A_ )
model.model.load_state_dict(A_ )
if finetuned:
_lowerCamelCase : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(A_, A_ ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
_lowerCamelCase : Optional[int] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(A_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = current_set.copy()
for row_index, row in enumerate(A_ ):
_lowerCamelCase : Tuple = row[0]
for column_index, column in enumerate(A_ ):
if magnitude == 0:
_lowerCamelCase : List[Any] = column
continue
_lowerCamelCase : List[Any] = column / magnitude
# Subtract to cancel term
_lowerCamelCase : Union[str, Any] = current_set[0]
_lowerCamelCase : Dict = [first_row]
_lowerCamelCase : str = current_set[1::]
for row in current_set:
_lowerCamelCase : Union[str, Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A_ )
continue
for column_index in range(len(A_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_lowerCamelCase : Any = final_set[0]
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_lowerCamelCase : Dict = simplify(A_ )
for i in range(len(A_ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, A_ )
_lowerCamelCase : Tuple = resultant
return final_set
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
if len(A_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
_lowerCamelCase : Dict = len(A_ ) + 1
if any(len(A_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(A_, (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(A_ ) == 1:
return [equations[0][-1] / equations[0][0]]
_lowerCamelCase : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
_lowerCamelCase : str = data_set.copy()
_lowerCamelCase : List[Any] = []
for row_index, row in enumerate(A_ ):
if 0 not in row:
_lowerCamelCase : Union[str, Any] = data_set.pop(A_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0, A_ )
_lowerCamelCase : List[str] = data_set.copy()
_lowerCamelCase : int = simplify(A_ )
_lowerCamelCase : int = simplified[::-1]
_lowerCamelCase : list = []
for row in simplified:
_lowerCamelCase : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A_ ) == 0:
solutions.append(0 )
continue
_lowerCamelCase : Tuple = temp_row[1::]
_lowerCamelCase : Tuple = temp_row[::-1]
for column_index, column in enumerate(A_ ):
current_solution -= column * solutions[column_index]
solutions.append(A_ )
_lowerCamelCase : Optional[int] = []
for item in solutions:
final.append(float(round(A_, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 72 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __snake_case :
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return None
class __snake_case :
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
"""simple docstring"""
return None
class __snake_case ( unittest.TestCase):
snake_case__ : Optional[Any] = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCAmelCase , '''tf''' , 1_2 , **__lowerCAmelCase )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCAmelCase , '''pt''' , 1_2 , **__lowerCAmelCase )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
from transformers import BertModel
_lowerCamelCase : Optional[int] = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(__lowerCAmelCase ) )
vocab_file.flush()
_lowerCamelCase : str = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__lowerCAmelCase ) ) )
model.save_pretrained(__lowerCAmelCase )
self._test_export(__lowerCAmelCase , '''pt''' , 1_2 , __lowerCAmelCase )
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCamelCase : Dict = self._test_export(__lowerCAmelCase , '''tf''' , 1_2 , **__lowerCAmelCase )
_lowerCamelCase : str = quantize(Path(__lowerCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCAmelCase ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCamelCase : Dict = self._test_export(__lowerCAmelCase , '''pt''' , 1_2 , **__lowerCAmelCase )
_lowerCamelCase : str = quantize(__lowerCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCAmelCase ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=None , **__lowerCAmelCase : int ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
_lowerCamelCase : List[Any] = Path(__lowerCAmelCase ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
return path
except Exception as e:
self.fail(__lowerCAmelCase )
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
from transformers import BertModel
_lowerCamelCase : Any = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_lowerCamelCase : Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__lowerCAmelCase , __lowerCAmelCase , '''pt''' )
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
from transformers import TFBertModel
_lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_lowerCamelCase : Optional[int] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__lowerCAmelCase , __lowerCAmelCase , '''tf''' )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = FeatureExtractionPipeline(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = infer_shapes(__lowerCAmelCase , __lowerCAmelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCAmelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
_lowerCamelCase : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
_lowerCamelCase , _lowerCamelCase : str = ensure_valid_input(FuncContiguousArgs() , __lowerCAmelCase , __lowerCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCAmelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCAmelCase ) , set(__lowerCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCAmelCase , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_lowerCamelCase , _lowerCamelCase : Optional[Any] = ensure_valid_input(FuncNonContiguousArgs() , __lowerCAmelCase , __lowerCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCAmelCase ) , 1 )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 72 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "Speech2TextFeatureExtractor"
snake_case__ : Union[str, Any] = "Speech2TextTokenizer"
def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : str = False
def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase : str = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : List[str] = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = self.tokenizer
yield
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : Tuple = False
| 72 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Any = KandinskyVaaImgaImgPipeline
snake_case__ : int = ["image_embeds", "negative_image_embeds", "image"]
snake_case__ : Optional[int] = [
"image_embeds",
"negative_image_embeds",
"image",
]
snake_case__ : Tuple = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case__ : Any = False
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return 1_0_0
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : List[str] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_lowerCamelCase : List[Any] = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.dummy_unet
_lowerCamelCase : Union[str, Any] = self.dummy_movq
_lowerCamelCase : Any = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_lowerCamelCase : Optional[Any] = DDIMScheduler(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=0 ):
"""simple docstring"""
_lowerCamelCase : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCAmelCase )
# create init_image
_lowerCamelCase : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase : int = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : Any = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : int = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = '''cpu'''
_lowerCamelCase : Dict = self.get_dummy_components()
_lowerCamelCase : Dict = self.pipeline_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = output.images
_lowerCamelCase : str = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase : Optional[int] = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
_lowerCamelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_lowerCamelCase : int = '''A red cartoon frog, 4k'''
_lowerCamelCase : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
_lowerCamelCase : Tuple = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
_lowerCamelCase , _lowerCamelCase : Any = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_lowerCamelCase : Union[str, Any] = pipeline(
image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , )
_lowerCamelCase : int = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
import unittest
import numpy as np
def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
if shape_a[0] != shape_b[0]:
_lowerCamelCase : Tuple = (
'''Expected the same number of rows for A and B. '''
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(A_ )
if shape_b[1] != shape_c[1]:
_lowerCamelCase : Tuple = (
'''Expected the same number of columns for B and C. '''
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(A_ )
_lowerCamelCase : List[str] = pseudo_inv
if a_inv is None:
try:
_lowerCamelCase : Any = np.linalg.inv(A_ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] )
_lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] )
_lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase )
self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : int = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : str ):
'''simple docstring'''
return [ord(A_ ) - 96 for elem in plain]
def snake_case_ ( A_ : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''', A_ )
print('''Decoded:''', decode(A_ ) )
if __name__ == "__main__":
main()
| 72 |
"""simple docstring"""
import math
def snake_case_ ( A_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( A_ : float = 0.1 ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = 3
_lowerCamelCase : List[str] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ):
primes += is_prime(A_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int = 2_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : int = [0 for i in range(n + 1 )]
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Any = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, A_ ):
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = 0
for i in range(A_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : int = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] )
_lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase )
_lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
_lowerCamelCase : int = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
_lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' )
_lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = -1
_lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n"
_lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = -1
_lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 )
_lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 72 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "roberta"
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int]=5_0_2_6_5 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[Any]=1_2 , __lowerCAmelCase : Union[str, Any]=3_0_7_2 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Dict=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : str=1 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Tuple="absolute" , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : Any = type_vocab_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : int = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Any = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 72 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : int = "retribert"
def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : int = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : int = share_encoders
_lowerCamelCase : Optional[Any] = projection_dim
| 72 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def snake_case_ ( A_ : Dict, A_ : str, A_ : str, A_ : Path, A_ : str = None, A_ : str = None, A_ : str = None, ):
'''simple docstring'''
if config_name_or_path is None:
_lowerCamelCase : Dict = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
_lowerCamelCase : Optional[Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_lowerCamelCase : Union[str, Any] = question_encoder_name_or_path
_lowerCamelCase : Optional[int] = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
_lowerCamelCase : Optional[Any] = RagConfig.from_pretrained(A_ )
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(A_ )
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(A_ )
_lowerCamelCase : Optional[int] = gen_config
_lowerCamelCase : List[str] = question_encoder_config
_lowerCamelCase : Union[str, Any] = model_class.from_pretrained_question_encoder_generator(
A_, A_, config=A_ )
rag_model.save_pretrained(A_ )
# Sanity check.
model_class.from_pretrained(A_ )
# Save tokenizers.
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(A_ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(A_ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 72 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_lowerCamelCase : Optional[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCamelCase : Any = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_lowerCamelCase : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : int = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 72 | 1 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def snake_case_ ( *A_ : Optional[Any], A_ : Optional[Union[Dict, Any]] = None, A_ : Optional[int]=True, A_ : str=2 ):
'''simple docstring'''
from .. import __version__
_lowerCamelCase : List[Any] = take_from
_lowerCamelCase : Union[str, Any] = ()
if not isinstance(args[0], A_ ):
_lowerCamelCase : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(A_ ).base_version ) >= version.parse(A_ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
_lowerCamelCase : str = None
if isinstance(A_, A_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(A_ ),)
_lowerCamelCase : Dict = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(A_, A_ ):
values += (getattr(A_, A_ ),)
_lowerCamelCase : Tuple = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
_lowerCamelCase : Optional[int] = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
_lowerCamelCase : Union[str, Any] = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message, A_, stacklevel=A_ )
if isinstance(A_, A_ ) and len(A_ ) > 0:
_lowerCamelCase : int = inspect.getouterframes(inspect.currentframe() )[1]
_lowerCamelCase : Dict = call_frame.filename
_lowerCamelCase : Union[str, Any] = call_frame.lineno
_lowerCamelCase : Any = call_frame.function
_lowerCamelCase , _lowerCamelCase : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(A_ ) == 0:
return
elif len(A_ ) == 1:
return values[0]
return values
| 72 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Dict = is_training
_lowerCamelCase : str = use_auxiliary_loss
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : int = min_size
_lowerCamelCase : Any = max_size
_lowerCamelCase : int = num_labels
_lowerCamelCase : List[str] = mask_feature_size
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
_lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = output.encoder_hidden_states
_lowerCamelCase : Tuple = output.pixel_decoder_hidden_states
_lowerCamelCase : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case__ : Any = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : List[str] = False
snake_case__ : Optional[int] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MaskFormerModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
_lowerCamelCase : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(),
}
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCamelCase : Union[str, Any] = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1E-4
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : int = model(**__lowerCAmelCase )
_lowerCamelCase : str = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : List[str] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : str = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : Tuple = self.default_image_processor
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : Any = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[str] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
_lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase )
_lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']]
_lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 72 | 1 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCAmelCase__ = '''bert-base-cased'''
lowerCAmelCase__ = '''google/pegasus-xsum'''
lowerCAmelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowerCAmelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowerCAmelCase__ = '''patrickvonplaten/t5-tiny-random'''
lowerCAmelCase__ = '''sshleifer/bart-tiny-random'''
lowerCAmelCase__ = '''sshleifer/tiny-mbart'''
lowerCAmelCase__ = '''sshleifer/tiny-marian-en-de'''
def snake_case_ ( A_ : Path, A_ : list ):
'''simple docstring'''
_lowerCamelCase : List[Any] = '''\n'''.join(A_ )
Path(A_ ).open('''w''' ).writelines(A_ )
def snake_case_ ( A_ : Tuple ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(A_, F'''{split}.source''' ), A_ )
_dump_articles(os.path.join(A_, F'''{split}.target''' ), A_ )
return tmp_dir
class __snake_case ( _lowercase):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowerCamelCase : str = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in ARTICLES )
_lowerCamelCase : List[Any] = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in SUMMARIES )
_lowerCamelCase : Union[str, Any] = 4
_lowerCamelCase : Dict = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_lowerCamelCase , _lowerCamelCase : Tuple = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
_lowerCamelCase : int = SeqaSeqDataset(
__lowerCAmelCase , data_dir=__lowerCAmelCase , type_path='''train''' , max_source_length=__lowerCAmelCase , max_target_length=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , )
_lowerCamelCase : Any = DataLoader(__lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_lowerCamelCase : List[Any] = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowerCamelCase : List[str] = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in ARTICLES )
_lowerCamelCase : Any = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in SUMMARIES )
_lowerCamelCase : List[str] = 4
_lowerCamelCase : Tuple = LegacySeqaSeqDataset(
__lowerCAmelCase , data_dir=__lowerCAmelCase , type_path='''train''' , max_source_length=2_0 , max_target_length=__lowerCAmelCase , )
_lowerCamelCase : Dict = DataLoader(__lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
_lowerCamelCase : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_lowerCamelCase : str = tmp_dir.joinpath('''train.source''' ).open().readlines()
_lowerCamelCase : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__lowerCAmelCase , __lowerCAmelCase , 1_2_8 , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = {x.name for x in tmp_dir.iterdir()}
_lowerCamelCase : Dict = {x.name for x in save_dir.iterdir()}
_lowerCamelCase : Optional[Any] = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__lowerCAmelCase ) < len(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == 1
assert len(packed_examples[0] ) == sum(len(__lowerCAmelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = self._get_dataset(max_len=6_4 )
_lowerCamelCase : Any = 6_4
_lowerCamelCase : str = ds.make_dynamic_sampler(__lowerCAmelCase , required_batch_size_multiple=__lowerCAmelCase )
_lowerCamelCase : List[str] = [len(__lowerCAmelCase ) for x in batch_sampler]
assert len(set(__lowerCAmelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__lowerCAmelCase ) == len(__lowerCAmelCase ) # no dropped or added examples
_lowerCamelCase : Optional[int] = DataLoader(__lowerCAmelCase , batch_sampler=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : List[Any] = []
for batch in data_loader:
_lowerCamelCase : Union[str, Any] = batch['''input_ids'''].shape
_lowerCamelCase : Optional[Any] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_lowerCamelCase : str = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(__lowerCAmelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__lowerCAmelCase )
assert num_src_per_batch[0] == max(__lowerCAmelCase )
if failures:
raise AssertionError(f'''too many tokens in {len(__lowerCAmelCase )} batches''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self._get_dataset(max_len=5_1_2 )
_lowerCamelCase : Optional[int] = 2
_lowerCamelCase : List[Any] = ds.make_sortish_sampler(__lowerCAmelCase , shuffle=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
_lowerCamelCase : Optional[int] = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.pad_token_id
def count_pad_tokens(__lowerCAmelCase : Dict , __lowerCAmelCase : str="input_ids" ):
return [batch[k].eq(__lowerCAmelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__lowerCAmelCase , k='''labels''' ) ) < sum(count_pad_tokens(__lowerCAmelCase , k='''labels''' ) )
assert sum(count_pad_tokens(__lowerCAmelCase ) ) < sum(count_pad_tokens(__lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[Any]=1_0_0_0 , __lowerCAmelCase : Dict=1_2_8 ):
"""simple docstring"""
if os.getenv('''USE_REAL_DATA''' , __lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = '''examples/seq2seq/wmt_en_ro'''
_lowerCamelCase : Dict = max_len * 2 * 6_4
if not Path(__lowerCAmelCase ).joinpath('''train.len''' ).exists():
save_len_file(__lowerCAmelCase , __lowerCAmelCase )
else:
_lowerCamelCase : int = '''examples/seq2seq/test_data/wmt_en_ro'''
_lowerCamelCase : Optional[int] = max_len * 4
save_len_file(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Dict = SeqaSeqDataset(
__lowerCAmelCase , data_dir=__lowerCAmelCase , type_path='''train''' , max_source_length=__lowerCAmelCase , max_target_length=__lowerCAmelCase , n_obs=__lowerCAmelCase , )
return ds, max_tokens, tokenizer
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self._get_dataset()
_lowerCamelCase : Optional[int] = set(DistributedSortishSampler(__lowerCAmelCase , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=__lowerCAmelCase ) )
_lowerCamelCase : Tuple = set(DistributedSortishSampler(__lowerCAmelCase , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=__lowerCAmelCase ) )
assert idsa.intersection(__lowerCAmelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase , use_fast=__lowerCAmelCase )
if tok_name == MBART_TINY:
_lowerCamelCase : str = SeqaSeqDataset(
__lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
_lowerCamelCase : Union[str, Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_lowerCamelCase : str = SeqaSeqDataset(
__lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
_lowerCamelCase : Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__lowerCAmelCase ) == 1 if tok_name == BART_TINY else len(__lowerCAmelCase ) == 0
| 72 |
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case_ ( A_ : dict, A_ : int, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] = set()
# keep track of all the paths to be checked
_lowerCamelCase : str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCamelCase : str = queue.pop(0 )
# get the last node from the path
_lowerCamelCase : List[Any] = path[-1]
if node not in explored:
_lowerCamelCase : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCamelCase : Union[str, Any] = list(A_ )
new_path.append(A_ )
queue.append(A_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A_ )
# in case there's no path between the 2 nodes
return []
def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCamelCase : Optional[int] = [start]
_lowerCamelCase : int = set(A_ )
# Keep tab on distances from `start` node.
_lowerCamelCase : int = {start: 0, target: -1}
while queue:
_lowerCamelCase : Optional[Any] = queue.pop(0 )
if node == target:
_lowerCamelCase : Any = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A_ )
queue.append(A_ )
_lowerCamelCase : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 72 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def snake_case_ ( A_ : list, A_ : list, A_ : list, A_ : list, A_ : list ):
'''simple docstring'''
_lowerCamelCase : Any = np.array([[1, item, train_mtch[i]] for i, item in enumerate(A_ )] )
_lowerCamelCase : Optional[int] = np.array(A_ )
_lowerCamelCase : List[str] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), A_ ) ), x.transpose() ), A_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def snake_case_ ( A_ : list, A_ : list, A_ : list ):
'''simple docstring'''
_lowerCamelCase : List[str] = (1, 2, 1)
_lowerCamelCase : Any = (1, 1, 0, 7)
_lowerCamelCase : int = SARIMAX(
A_, exog=A_, order=A_, seasonal_order=A_ )
_lowerCamelCase : Optional[int] = model.fit(disp=A_, maxiter=6_00, method='''nm''' )
_lowerCamelCase : Any = model_fit.predict(1, len(A_ ), exog=[test_match] )
return result[0]
def snake_case_ ( A_ : list, A_ : list, A_ : list ):
'''simple docstring'''
_lowerCamelCase : Any = SVR(kernel='''rbf''', C=1, gamma=0.1, epsilon=0.1 )
regressor.fit(A_, A_ )
_lowerCamelCase : Optional[Any] = regressor.predict(A_ )
return y_pred[0]
def snake_case_ ( A_ : list ):
'''simple docstring'''
train_user.sort()
_lowerCamelCase : Dict = np.percentile(A_, 25 )
_lowerCamelCase : Optional[int] = np.percentile(A_, 75 )
_lowerCamelCase : Dict = qa - qa
_lowerCamelCase : Tuple = qa - (iqr * 0.1)
return low_lim
def snake_case_ ( A_ : list, A_ : float ):
'''simple docstring'''
_lowerCamelCase : Any = 0
_lowerCamelCase : Dict = 0
for i in list_vote:
if i > actual_result:
_lowerCamelCase : Optional[Any] = not_safe + 1
else:
if abs(abs(A_ ) - abs(A_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowerCAmelCase__ = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
lowerCAmelCase__ = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
lowerCAmelCase__ = Normalizer().fit_transform(data_input_df.values)
# split data
lowerCAmelCase__ = normalize_df[:, 2].tolist()
lowerCAmelCase__ = normalize_df[:, 0].tolist()
lowerCAmelCase__ = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowerCAmelCase__ = normalize_df[:, [1, 2]].tolist()
lowerCAmelCase__ = x[: len(x) - 1]
lowerCAmelCase__ = x[len(x) - 1 :]
# for linear regression & sarimax
lowerCAmelCase__ = total_date[: len(total_date) - 1]
lowerCAmelCase__ = total_user[: len(total_user) - 1]
lowerCAmelCase__ = total_match[: len(total_match) - 1]
lowerCAmelCase__ = total_date[len(total_date) - 1 :]
lowerCAmelCase__ = total_user[len(total_user) - 1 :]
lowerCAmelCase__ = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowerCAmelCase__ = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowerCAmelCase__ = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 72 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase : List[str] = LxmertForPreTraining(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A_, A_, A_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 72 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = SwinConfig()
_lowerCamelCase : str = swin_name.split('''_''' )
_lowerCamelCase : List[str] = name_split[1]
_lowerCamelCase : str = int(name_split[4] )
_lowerCamelCase : str = int(name_split[3][-1] )
if model_size == "tiny":
_lowerCamelCase : int = 96
_lowerCamelCase : List[Any] = (2, 2, 6, 2)
_lowerCamelCase : Tuple = (3, 6, 12, 24)
elif model_size == "small":
_lowerCamelCase : Union[str, Any] = 96
_lowerCamelCase : Tuple = (2, 2, 18, 2)
_lowerCamelCase : List[Any] = (3, 6, 12, 24)
elif model_size == "base":
_lowerCamelCase : Union[str, Any] = 1_28
_lowerCamelCase : List[Any] = (2, 2, 18, 2)
_lowerCamelCase : Tuple = (4, 8, 16, 32)
else:
_lowerCamelCase : Union[str, Any] = 1_92
_lowerCamelCase : int = (2, 2, 18, 2)
_lowerCamelCase : List[str] = (6, 12, 24, 48)
if "in22k" in swin_name:
_lowerCamelCase : Optional[int] = 2_18_41
else:
_lowerCamelCase : str = 10_00
_lowerCamelCase : Optional[int] = '''huggingface/label-files'''
_lowerCamelCase : List[str] = '''imagenet-1k-id2label.json'''
_lowerCamelCase : Dict = json.load(open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) )
_lowerCamelCase : List[str] = {int(A_ ): v for k, v in idalabel.items()}
_lowerCamelCase : List[str] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : int = img_size
_lowerCamelCase : Dict = num_classes
_lowerCamelCase : List[str] = embed_dim
_lowerCamelCase : Dict = depths
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : str = window_size
return config
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
if "patch_embed.proj" in name:
_lowerCamelCase : List[str] = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase : Any = name.replace('''patch_embed.norm''', '''embeddings.norm''' )
if "layers" in name:
_lowerCamelCase : Dict = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace('''attn.proj''', '''attention.output.dense''' )
if "attn" in name:
_lowerCamelCase : Optional[int] = name.replace('''attn''', '''attention.self''' )
if "norm1" in name:
_lowerCamelCase : Optional[int] = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase : str = name.replace('''norm2''', '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase : str = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase : Dict = name.replace('''mlp.fc2''', '''output.dense''' )
if name == "norm.weight":
_lowerCamelCase : Optional[int] = '''layernorm.weight'''
if name == "norm.bias":
_lowerCamelCase : int = '''layernorm.bias'''
if "head" in name:
_lowerCamelCase : Optional[Any] = name.replace('''head''', '''classifier''' )
else:
_lowerCamelCase : Tuple = '''swin.''' + name
return name
def snake_case_ ( A_ : Optional[int], A_ : int ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[Any] = orig_state_dict.pop(A_ )
if "mask" in key:
continue
elif "qkv" in key:
_lowerCamelCase : Any = key.split('''.''' )
_lowerCamelCase : Optional[int] = int(key_split[1] )
_lowerCamelCase : List[Any] = int(key_split[3] )
_lowerCamelCase : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : Dict = val[
dim : dim * 2, :
]
_lowerCamelCase : List[str] = val[-dim:, :]
else:
_lowerCamelCase : List[Any] = val[
:dim
]
_lowerCamelCase : List[Any] = val[
dim : dim * 2
]
_lowerCamelCase : List[Any] = val[
-dim:
]
else:
_lowerCamelCase : Optional[Any] = val
return orig_state_dict
def snake_case_ ( A_ : Dict, A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = timm.create_model(A_, pretrained=A_ )
timm_model.eval()
_lowerCamelCase : List[str] = get_swin_config(A_ )
_lowerCamelCase : List[str] = SwinForImageClassification(A_ )
model.eval()
_lowerCamelCase : Union[str, Any] = convert_state_dict(timm_model.state_dict(), A_ )
model.load_state_dict(A_ )
_lowerCamelCase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''', '''-''' ) ) )
_lowerCamelCase : Optional[int] = Image.open(requests.get(A_, stream=A_ ).raw )
_lowerCamelCase : Tuple = image_processor(images=A_, return_tensors='''pt''' )
_lowerCamelCase : List[str] = timm_model(inputs['''pixel_values'''] )
_lowerCamelCase : str = model(**A_ ).logits
assert torch.allclose(A_, A_, atol=1E-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 72 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841
_lowerCamelCase : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowerCamelCase : Any = defaultdict(A_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_lowerCamelCase : List[str] = mst(A_ )
_lowerCamelCase : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_lowerCamelCase : int = tuple(answer[:2] )
_lowerCamelCase : int = tuple(edge[::-1] )
assert edge in result or reverse in result
| 72 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __snake_case ( _lowercase):
snake_case__ : Any = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
snake_case__ : Any = BartTokenizer
def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = '''post_processor'''
_lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
_lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase : int = tuple(state['''cls'''] )
_lowerCamelCase : Union[str, Any] = False
if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = add_prefix_space
_lowerCamelCase : Optional[Any] = True
if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets:
_lowerCamelCase : Any = trim_offsets
_lowerCamelCase : str = True
if changes_to_apply:
_lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) )
_lowerCamelCase : str = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
_lowerCamelCase : str = value
def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72 | 1 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __snake_case ( datasets.BuilderConfig):
snake_case__ : Optional[datasets.Features] = None
def snake_case_ ( A_ : "pyspark.sql.DataFrame", A_ : List[int], ):
'''simple docstring'''
import pyspark
def generate_fn():
_lowerCamelCase : Any = df.select('''*''', pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
_lowerCamelCase : List[str] = df_with_partition_id.select('''*''' ).where(F'''part_id = {partition_id}''' ).drop('''part_id''' )
_lowerCamelCase : Optional[Any] = partition_df.collect()
_lowerCamelCase : int = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __snake_case ( _BaseExamplesIterable):
def __init__( self : List[str] , __lowerCAmelCase : "pyspark.sql.DataFrame" , __lowerCAmelCase : List[str]=None , ):
"""simple docstring"""
_lowerCamelCase : int = df
_lowerCamelCase : Union[str, Any] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowerCamelCase : str = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Optional[int] ):
"""simple docstring"""
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : np.random.Generator ):
"""simple docstring"""
_lowerCamelCase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.split_shard_indices_by_worker(__lowerCAmelCase , __lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.partition_order )
class __snake_case ( datasets.DatasetBuilder):
snake_case__ : Dict = SparkConfig
def __init__( self : str , __lowerCAmelCase : "pyspark.sql.DataFrame" , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , **__lowerCAmelCase : str , ):
"""simple docstring"""
import pyspark
_lowerCamelCase : Union[str, Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowerCamelCase : Tuple = df
_lowerCamelCase : Tuple = working_dir
super().__init__(
cache_dir=__lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
def create_cache_and_write_probe(__lowerCAmelCase : Optional[int] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__lowerCAmelCase )
_lowerCamelCase : str = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__lowerCAmelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowerCamelCase : Union[str, Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowerCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : datasets.download.download_manager.DownloadManager ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : str ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(__lowerCAmelCase : Union[str, Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
_lowerCamelCase : Optional[int] = self.df.count()
_lowerCamelCase : Optional[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowerCamelCase : Union[str, Any] = (
self.df.limit(__lowerCAmelCase )
.repartition(1 )
.mapInArrow(__lowerCAmelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowerCamelCase : Any = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowerCamelCase : Union[str, Any] = min(__lowerCAmelCase , int(approx_total_size / max_shard_size ) )
_lowerCamelCase : List[str] = self.df.repartition(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int , ):
"""simple docstring"""
import pyspark
_lowerCamelCase : Dict = ParquetWriter if file_format == '''parquet''' else ArrowWriter
_lowerCamelCase : Optional[Any] = os.path.join(self._working_dir , os.path.basename(__lowerCAmelCase ) ) if self._working_dir else fpath
_lowerCamelCase : Tuple = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowerCamelCase : int = self.config.features
_lowerCamelCase : Union[str, Any] = self._writer_batch_size
_lowerCamelCase : Optional[int] = self._fs.storage_options
def write_arrow(__lowerCAmelCase : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowerCamelCase : int = pyspark.TaskContext().taskAttemptId()
_lowerCamelCase : Optional[int] = next(__lowerCAmelCase , __lowerCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Optional[Any] = writer_class(
features=__lowerCAmelCase , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , )
_lowerCamelCase : Dict = pa.Table.from_batches([first_batch] )
writer.write_table(__lowerCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
_lowerCamelCase : Dict = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , )
_lowerCamelCase : int = pa.Table.from_batches([batch] )
writer.write_table(__lowerCAmelCase )
if writer._num_bytes > 0:
_lowerCamelCase , _lowerCamelCase : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , os.path.basename(__lowerCAmelCase ) )
shutil.move(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Any = (
self.df.mapInArrow(__lowerCAmelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : "datasets.SplitGenerator" , __lowerCAmelCase : str = "arrow" , __lowerCAmelCase : Optional[Union[str, int]] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : List[Any] , ):
"""simple docstring"""
self._validate_cache_dir()
_lowerCamelCase : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__lowerCAmelCase )
_lowerCamelCase : str = not is_remote_filesystem(self._fs )
_lowerCamelCase : List[Any] = os.path.join if is_local else posixpath.join
_lowerCamelCase : Optional[int] = '''-TTTTT-SSSSS-of-NNNNN'''
_lowerCamelCase : List[Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
_lowerCamelCase : List[Any] = path_join(self._output_dir , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : List[str] = []
_lowerCamelCase : Union[str, Any] = []
for task_id, content in self._prepare_split_single(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[int] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__lowerCAmelCase )
_lowerCamelCase : Tuple = total_num_examples
_lowerCamelCase : Dict = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
_lowerCamelCase : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowerCamelCase : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
rename(
__lowerCAmelCase , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , )
_lowerCamelCase : Tuple = []
_lowerCamelCase : Optional[Any] = 0
for i in range(len(__lowerCAmelCase ) ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(__lowerCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__lowerCAmelCase , len(__lowerCAmelCase ) ).map(lambda __lowerCAmelCase : _rename_shard(*__lowerCAmelCase ) ).collect()
else:
# don't use any pattern
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : List[str] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(__lowerCAmelCase , '''''' ) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : "datasets.SplitGenerator" , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 72 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : str ):
'''simple docstring'''
return [ord(A_ ) - 96 for elem in plain]
def snake_case_ ( A_ : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''', A_ )
print('''Decoded:''', decode(A_ ) )
if __name__ == "__main__":
main()
| 72 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "MCTCTFeatureExtractor"
snake_case__ : Optional[int] = "AutoTokenizer"
def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.feature_extractor
_lowerCamelCase : Optional[Any] = False
def __call__( self : Optional[int] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : str ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase : str = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase : str = kwargs.pop('''audio''' , __lowerCAmelCase )
_lowerCamelCase : Dict = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
_lowerCamelCase : List[str] = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : Optional[Any] = args[0]
_lowerCamelCase : str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase : str = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_lowerCamelCase : Union[str, Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : Dict = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self : Dict , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Tuple = kwargs.pop('''input_features''' , __lowerCAmelCase )
_lowerCamelCase : int = kwargs.pop('''labels''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[str] = args[0]
_lowerCamelCase : Optional[int] = args[1:]
if input_features is not None:
_lowerCamelCase : int = self.feature_extractor.pad(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
if labels is not None:
_lowerCamelCase : Tuple = self.tokenizer.pad(__lowerCAmelCase , **__lowerCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCamelCase : Any = labels['''input_ids''']
return input_features
def SCREAMING_SNAKE_CASE ( self : List[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase : Tuple = True
_lowerCamelCase : str = self.tokenizer
yield
_lowerCamelCase : Union[str, Any] = self.feature_extractor
_lowerCamelCase : Optional[Any] = False
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase : List[str] = LxmertForPreTraining(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A_, A_, A_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A_ ):
if len(A_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A_ ) )
return data_lists
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for dlist, weight in zip(A_, A_ ):
_lowerCamelCase : Any = min(A_ )
_lowerCamelCase : Optional[Any] = max(A_ )
_lowerCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowerCamelCase : str = F'''Invalid weight of {weight:f} provided'''
raise ValueError(A_ )
score_lists.append(A_ )
return score_lists
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A_ ):
_lowerCamelCase : List[str] = final_scores[j] + ele
return final_scores
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = get_data(A_ )
_lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ )
_lowerCamelCase : str = generate_final_scores(A_ )
# append scores to source data
for i, ele in enumerate(A_ ):
source_data[i].append(A_ )
return source_data
| 72 | 1 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def snake_case_ ( A_ : Union[str, Any], A_ : Dict, A_ : Any, A_ : Optional[int] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_lowerCamelCase : List[str] = TOKENIZER_CLASSES
else:
_lowerCamelCase : List[str] = {tokenizer_name: getattr(A_, tokenizer_name + '''Fast''' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_lowerCamelCase : Optional[int] = TOKENIZER_CLASSES[tokenizer_name]
_lowerCamelCase : List[str] = True
if checkpoint_name is None:
_lowerCamelCase : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_lowerCamelCase : List[str] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_lowerCamelCase : int = tokenizer_class.from_pretrained(A_, force_download=A_ )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = checkpoint.split('''/''' )
_lowerCamelCase : Dict = os.path.join(A_, A_ )
elif add_prefix:
_lowerCamelCase : List[Any] = checkpoint
_lowerCamelCase : str = dump_path
else:
_lowerCamelCase : str = None
_lowerCamelCase : List[str] = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_lowerCamelCase : int = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_lowerCamelCase : Union[str, Any] = file_path.split(A_ )[-1][0]
if next_char == "/":
_lowerCamelCase : Any = os.path.join(A_, A_ )
_lowerCamelCase : Union[str, Any] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_lowerCamelCase : Union[str, Any] = tokenizer.save_pretrained(
A_, legacy_format=A_, filename_prefix=A_ )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(A_ )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 72 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "unispeech"
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = feat_extract_norm
_lowerCamelCase : List[Any] = feat_extract_activation
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : Tuple = list(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : List[str] = conv_bias
_lowerCamelCase : List[str] = num_conv_pos_embeddings
_lowerCamelCase : Tuple = num_conv_pos_embedding_groups
_lowerCamelCase : List[str] = len(self.conv_dim )
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Optional[Any] = feat_proj_dropout
_lowerCamelCase : Optional[int] = final_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[str] = num_ctc_classes
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = do_stable_layer_norm
_lowerCamelCase : Tuple = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Any = apply_spec_augment
_lowerCamelCase : Dict = mask_time_prob
_lowerCamelCase : List[str] = mask_time_length
_lowerCamelCase : Optional[Any] = mask_time_min_masks
_lowerCamelCase : List[str] = mask_feature_prob
_lowerCamelCase : int = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : Optional[Any] = num_codevectors_per_group
_lowerCamelCase : int = num_codevector_groups
_lowerCamelCase : List[Any] = contrastive_logits_temperature
_lowerCamelCase : List[str] = feat_quantizer_dropout
_lowerCamelCase : Dict = num_negatives
_lowerCamelCase : Optional[int] = codevector_dim
_lowerCamelCase : List[Any] = proj_codevector_dim
_lowerCamelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCamelCase : Union[str, Any] = ctc_loss_reduction
_lowerCamelCase : Any = ctc_zero_infinity
# pretraining loss
_lowerCamelCase : str = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 72 | 1 |
"""simple docstring"""
from torch import nn
class __snake_case ( nn.Module):
def __init__( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[Any] = class_size
_lowerCamelCase : Optional[Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_lowerCamelCase : List[str] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.mlp(__lowerCAmelCase )
return logits
| 72 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
_lowerCamelCase : Optional[Any] = quote(A_ )
return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
| 72 | 1 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def snake_case_ ( A_ : List[str], A_ : int, A_ : Optional[int]=0 ):
'''simple docstring'''
if name is None:
_lowerCamelCase : List[Any] = None
else:
_lowerCamelCase : List[str] = '''.''' * max(0, spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
_lowerCamelCase : Optional[int] = fmt.format(A_ )
# Print and recurse (if needed).
if isinstance(A_, A_ ):
if msg is not None:
print(A_ )
for k in val.keys():
recursive_print(A_, val[k], spaces + 2 )
elif isinstance(A_, torch.Tensor ):
print(A_, ''':''', val.size() )
else:
print(A_, ''':''', A_ )
def snake_case_ ( A_ : Optional[int], A_ : Union[str, Any], A_ : List[Any], A_ : Optional[Any], A_ : Any ):
'''simple docstring'''
_lowerCamelCase : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowerCamelCase : List[Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowerCamelCase : List[Any] = param.view(*A_ )
_lowerCamelCase : Any = param.transpose(0, 2 )
_lowerCamelCase : Union[str, Any] = param.transpose(1, 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowerCamelCase : Any = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowerCamelCase : Optional[Any] = param.view(*A_ )
_lowerCamelCase : Any = param.transpose(0, 1 ).contiguous()
_lowerCamelCase : Any = param.view(*A_ )
return param
def snake_case_ ( A_ : Optional[Any], A_ : Tuple, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {}
# old versions did not store training args
_lowerCamelCase : Union[str, Any] = input_state_dict.get('''args''', A_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowerCamelCase : Optional[Any] = ds_args.padded_vocab_size
_lowerCamelCase : List[Any] = ds_args.max_position_embeddings
_lowerCamelCase : Dict = ds_args.hidden_size
_lowerCamelCase : Union[str, Any] = ds_args.num_layers
_lowerCamelCase : Any = ds_args.num_attention_heads
_lowerCamelCase : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowerCamelCase : Optional[int] = config.n_head
# The hidden_size per head.
_lowerCamelCase : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowerCamelCase : List[Any] = input_state_dict['''checkpoint_version''']
else:
_lowerCamelCase : List[str] = 0.0
# The model.
_lowerCamelCase : Tuple = input_state_dict['''model''']
# The language model.
_lowerCamelCase : Dict = model['''language_model''']
# The embeddings.
_lowerCamelCase : int = lm['''embedding''']
# The word embeddings.
_lowerCamelCase : Optional[int] = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
_lowerCamelCase : Dict = word_embeddings[: config.vocab_size, :]
_lowerCamelCase : str = word_embeddings
# The position embeddings.
_lowerCamelCase : Tuple = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowerCamelCase : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_lowerCamelCase : List[Any] = pos_embeddings
# The transformer.
_lowerCamelCase : str = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
_lowerCamelCase : int = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
_lowerCamelCase : Union[str, Any] = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowerCamelCase : int = layer_re.match(A_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowerCamelCase : int = int(m.group(1 ) )
# The name of the operation.
_lowerCamelCase : str = m.group(2 )
# Is it a weight or a bias?
_lowerCamelCase : Any = m.group(3 )
# The name of the layer.
_lowerCamelCase : Tuple = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
_lowerCamelCase : List[str] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
_lowerCamelCase : Tuple = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowerCamelCase : List[Any] = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.floataa ) ).view(
1, 1, A_, A_ )
_lowerCamelCase : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowerCamelCase : int = torch.tensor(-1E4, dtype=torch.floataa )
_lowerCamelCase : List[str] = masked_bias
_lowerCamelCase : Tuple = fix_query_key_value_ordering(A_, A_, 3, A_, A_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowerCamelCase : Optional[int] = out_val.transpose(0, 1 ).contiguous()
# Store.
_lowerCamelCase : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowerCamelCase : Tuple = fix_query_key_value_ordering(A_, A_, 3, A_, A_ )
# Store. No change of shape.
_lowerCamelCase : str = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowerCamelCase : Union[str, Any] = megatron_to_transformers[op_name]
_lowerCamelCase : Tuple = val.transpose(0, 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowerCamelCase : Any = megatron_to_transformers[op_name]
_lowerCamelCase : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowerCamelCase : Optional[Any] = transformer['''final_layernorm.weight''']
_lowerCamelCase : Optional[Any] = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
_lowerCamelCase : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''', action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''', type=A_, help='''Path to the checkpoint file (.zip archive or direct .pt file)''', )
parser.add_argument(
'''--config_file''', default='''''', type=A_, help='''An optional config json file describing the pre-trained model.''', )
_lowerCamelCase : Union[str, Any] = parser.parse_args()
# Extract the basename.
_lowerCamelCase : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint, '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
_lowerCamelCase : Any = torch.load(A_, map_location='''cpu''' )
else:
_lowerCamelCase : Dict = torch.load(args.path_to_checkpoint, map_location='''cpu''' )
_lowerCamelCase : Tuple = input_state_dict.get('''args''', A_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowerCamelCase : List[str] = '''gelu_fast'''
elif ds_args.openai_gelu:
_lowerCamelCase : List[Any] = '''gelu_new'''
else:
_lowerCamelCase : Optional[Any] = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
_lowerCamelCase : List[str] = '''gelu_new'''
# Spell out all parameters in case the defaults change.
_lowerCamelCase : str = GPTaConfig(
vocab_size=5_02_57, n_positions=10_24, n_embd=10_24, n_layer=24, n_head=16, n_inner=40_96, activation_function=A_, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1E-5, initializer_range=0.02, summary_type='''cls_index''', summary_use_proj=A_, summary_activation=A_, summary_proj_to_labels=A_, summary_first_dropout=0.1, scale_attn_weights=A_, use_cache=A_, bos_token_id=5_02_56, eos_token_id=5_02_56, )
else:
_lowerCamelCase : List[Any] = GPTaConfig.from_json_file(args.config_file )
_lowerCamelCase : str = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
_lowerCamelCase : List[Any] = convert_megatron_checkpoint(A_, A_, A_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(A_, A_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowerCamelCase : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowerCamelCase : str = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
_lowerCamelCase : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_lowerCamelCase : Tuple = '''gpt2'''
_lowerCamelCase : str = AutoTokenizer.from_pretrained(A_ )
_lowerCamelCase : Union[str, Any] = type(A_ ).__name__
_lowerCamelCase : Tuple = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(A_ )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(A_ )
# Store the state_dict to file.
_lowerCamelCase : Any = os.path.join(A_, '''pytorch_model.bin''' )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(A_, A_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 72 |
"""simple docstring"""
import unittest
import numpy as np
def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
if shape_a[0] != shape_b[0]:
_lowerCamelCase : Tuple = (
'''Expected the same number of rows for A and B. '''
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(A_ )
if shape_b[1] != shape_c[1]:
_lowerCamelCase : Tuple = (
'''Expected the same number of columns for B and C. '''
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(A_ )
_lowerCamelCase : List[str] = pseudo_inv
if a_inv is None:
try:
_lowerCamelCase : Any = np.linalg.inv(A_ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] )
_lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] )
_lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase )
self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : int = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 72 | 1 |
"""simple docstring"""
from math import isqrt, loga
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, A_, A_ ):
_lowerCamelCase : str = False
return [i for i in range(2, A_ ) if is_prime[i]]
def snake_case_ ( A_ : int = 80_08_00, A_ : int = 80_08_00 ):
'''simple docstring'''
_lowerCamelCase : Dict = degree * loga(A_ )
_lowerCamelCase : Any = int(A_ )
_lowerCamelCase : List[Any] = calculate_prime_numbers(A_ )
_lowerCamelCase : Dict = 0
_lowerCamelCase : Dict = 0
_lowerCamelCase : str = len(A_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[int], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = int(A_ )
# Initialize Result
_lowerCamelCase : Dict = []
# Traverse through all denomination
for denomination in reversed(A_ ):
# Find denominations
while int(A_ ) >= int(A_ ):
total_value -= int(A_ )
answer.append(A_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase__ = []
lowerCAmelCase__ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
lowerCAmelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 72 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Any = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape
_lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ )
_lowerCamelCase : str = emb.weight.data
return lin_layer
def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model''']
remove_ignore_keys_(A_ )
_lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ )
if mbart_aa and finetuned:
_lowerCamelCase : Any = '''relu'''
_lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight''']
_lowerCamelCase : Any = MBartForConditionalGeneration(A_ )
model.model.load_state_dict(A_ )
if finetuned:
_lowerCamelCase : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : int = 2_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : int = [0 for i in range(n + 1 )]
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Any = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, A_ ):
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = 0
for i in range(A_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : int = GPTaTokenizer
snake_case__ : Dict = GPTaTokenizerFast
snake_case__ : Union[str, Any] = True
snake_case__ : Dict = {"add_prefix_space": True}
snake_case__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_lowerCamelCase : int = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Optional[int] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCamelCase : List[str] = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__lowerCAmelCase : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''lower newer'''
_lowerCamelCase : List[str] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Any = '''lower newer'''
_lowerCamelCase : Tuple = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_lowerCamelCase : Any = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Any = tokens + [tokenizer.unk_token]
_lowerCamelCase : Any = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : List[str] = '''lower newer'''
# Testing tokenization
_lowerCamelCase : Dict = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
_lowerCamelCase : Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing the unknown token
_lowerCamelCase : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : List[str] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int=1_5 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
_lowerCamelCase : Union[str, Any] = '''This is a simple input'''
_lowerCamelCase : int = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : Any = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : List[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_lowerCamelCase : Optional[int] = '''This is a simple input'''
_lowerCamelCase : Dict = ['''This is a simple input looooooooong''', '''This is a simple input''']
_lowerCamelCase : Any = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : Union[str, Any] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_lowerCamelCase : Tuple = tokenizer.pad_token_id
_lowerCamelCase : Optional[int] = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
_lowerCamelCase : Optional[int] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : List[str] = tokenizer(*__lowerCAmelCase , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
_lowerCamelCase : Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = '''$$$'''
_lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )
_lowerCamelCase : int = '''This is a simple input'''
_lowerCamelCase : Optional[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : Optional[int] = tokenizer.bos_token_id
_lowerCamelCase : Union[str, Any] = tokenizer(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tokenizer(__lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : str = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = [self.get_tokenizer(do_lower_case=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_lowerCamelCase : Any = '''Encode this.'''
_lowerCamelCase : List[Any] = '''This one too please.'''
_lowerCamelCase : Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
encoded_sequence += tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : str = tokenizer.encode_plus(
__lowerCAmelCase , __lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , )
_lowerCamelCase : str = encoded_sequence_dict['''input_ids''']
_lowerCamelCase : List[str] = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
_lowerCamelCase : Any = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__lowerCAmelCase )
]
_lowerCamelCase : Optional[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_tokenizers
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
_lowerCamelCase : Tuple = '''A photo of a cat'''
_lowerCamelCase : str = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained('''./test_opt''' )
_lowerCamelCase : List[str] = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=__lowerCAmelCase )
_lowerCamelCase : int = '''A photo of a cat'''
_lowerCamelCase : Any = tokenizer.encode(
__lowerCAmelCase , )
# Same as above
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
_lowerCamelCase : Dict = '''bos'''
_lowerCamelCase : int = tokenizer.get_vocab()['''bos''']
_lowerCamelCase : List[Any] = '''A photo of a cat'''
_lowerCamelCase : List[str] = tokenizer.encode(
__lowerCAmelCase , )
# We changed the bos token
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
_lowerCamelCase : int = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 72 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Any = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape
_lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ )
_lowerCamelCase : str = emb.weight.data
return lin_layer
def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model''']
remove_ignore_keys_(A_ )
_lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ )
if mbart_aa and finetuned:
_lowerCamelCase : Any = '''relu'''
_lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight''']
_lowerCamelCase : Any = MBartForConditionalGeneration(A_ )
model.model.load_state_dict(A_ )
if finetuned:
_lowerCamelCase : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 72 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( _lowercase , _lowercase):
snake_case__ : Optional[int] = "swin"
snake_case__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , __lowerCAmelCase : List[Any]=2_2_4 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Optional[int]=9_6 , __lowerCAmelCase : List[str]=[2, 2, 6, 2] , __lowerCAmelCase : int=[3, 6, 1_2, 2_4] , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : List[str]=4.0 , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : int=1E-5 , __lowerCAmelCase : Optional[int]=3_2 , __lowerCAmelCase : int=None , __lowerCAmelCase : int=None , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Any = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Optional[int] = embed_dim
_lowerCamelCase : Optional[int] = depths
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
_lowerCamelCase : List[Any] = num_heads
_lowerCamelCase : Union[str, Any] = window_size
_lowerCamelCase : Dict = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Dict = drop_path_rate
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : List[Any] = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
_lowerCamelCase : List[str] = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
class __snake_case ( _lowercase):
snake_case__ : str = version.parse("1.11")
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return 1E-4
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = current_set.copy()
for row_index, row in enumerate(A_ ):
_lowerCamelCase : Tuple = row[0]
for column_index, column in enumerate(A_ ):
if magnitude == 0:
_lowerCamelCase : List[Any] = column
continue
_lowerCamelCase : List[Any] = column / magnitude
# Subtract to cancel term
_lowerCamelCase : Union[str, Any] = current_set[0]
_lowerCamelCase : Dict = [first_row]
_lowerCamelCase : str = current_set[1::]
for row in current_set:
_lowerCamelCase : Union[str, Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A_ )
continue
for column_index in range(len(A_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_lowerCamelCase : Any = final_set[0]
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_lowerCamelCase : Dict = simplify(A_ )
for i in range(len(A_ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, A_ )
_lowerCamelCase : Tuple = resultant
return final_set
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
if len(A_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
_lowerCamelCase : Dict = len(A_ ) + 1
if any(len(A_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(A_, (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(A_ ) == 1:
return [equations[0][-1] / equations[0][0]]
_lowerCamelCase : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
_lowerCamelCase : str = data_set.copy()
_lowerCamelCase : List[Any] = []
for row_index, row in enumerate(A_ ):
if 0 not in row:
_lowerCamelCase : Union[str, Any] = data_set.pop(A_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0, A_ )
_lowerCamelCase : List[str] = data_set.copy()
_lowerCamelCase : int = simplify(A_ )
_lowerCamelCase : int = simplified[::-1]
_lowerCamelCase : list = []
for row in simplified:
_lowerCamelCase : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A_ ) == 0:
solutions.append(0 )
continue
_lowerCamelCase : Tuple = temp_row[1::]
_lowerCamelCase : Tuple = temp_row[::-1]
for column_index, column in enumerate(A_ ):
current_solution -= column * solutions[column_index]
solutions.append(A_ )
_lowerCamelCase : Optional[int] = []
for item in solutions:
final.append(float(round(A_, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 72 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( _lowercase):
snake_case__ : str = ["image_processor", "tokenizer"]
snake_case__ : int = "LayoutLMv2ImageProcessor"
snake_case__ : Dict = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : int , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : List[Any]=None , **__lowerCAmelCase : str ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
_lowerCamelCase : int = kwargs.pop('''feature_extractor''' )
_lowerCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __lowerCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , __lowerCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
_lowerCamelCase : Dict = self.image_processor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : List[str] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_lowerCamelCase : Tuple = features['''words''']
_lowerCamelCase : List[Any] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# add pixel values
_lowerCamelCase : Dict = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
_lowerCamelCase : int = self.get_overflowing_images(__lowerCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
_lowerCamelCase : str = images
return encoded_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}''' )
return images_with_overflow
def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , )
return self.image_processor
| 72 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "Speech2TextFeatureExtractor"
snake_case__ : Union[str, Any] = "Speech2TextTokenizer"
def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : str = False
def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase : str = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : List[str] = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = self.tokenizer
yield
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : Tuple = False
| 72 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __snake_case ( _lowercase):
snake_case__ : Any = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
snake_case__ : Any = BartTokenizer
def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = '''post_processor'''
_lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
_lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase : int = tuple(state['''cls'''] )
_lowerCamelCase : Union[str, Any] = False
if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = add_prefix_space
_lowerCamelCase : Optional[Any] = True
if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets:
_lowerCamelCase : Any = trim_offsets
_lowerCamelCase : str = True
if changes_to_apply:
_lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) )
_lowerCamelCase : str = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
_lowerCamelCase : str = value
def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def snake_case_ ( A_ : np.ndarray, A_ : tuple[int, int], A_ : tuple[int, int], A_ : bool, ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Dict = grid.shape
_lowerCamelCase : int = [-1, 1, 0, 0]
_lowerCamelCase : List[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_lowerCamelCase , _lowerCamelCase : str = [(0, source)], set()
_lowerCamelCase : int = np.full((rows, cols), np.inf )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = np.empty((rows, cols), dtype=A_ )
_lowerCamelCase : List[str] = None
while queue:
((_lowerCamelCase) , (_lowerCamelCase)) : Any = heappop(A_ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_lowerCamelCase : Optional[int] = []
while (x, y) != source:
path.append((x, y) )
_lowerCamelCase , _lowerCamelCase : List[str] = predecessors[x, y]
path.append(A_ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(A_ ) ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_lowerCamelCase : Tuple = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(A_, (dist + 1, (nx, ny)) )
_lowerCamelCase : Dict = dist + 1
_lowerCamelCase : str = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
"""simple docstring"""
import math
def snake_case_ ( A_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( A_ : float = 0.1 ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = 3
_lowerCamelCase : List[str] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ):
primes += is_prime(A_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 1 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowerCAmelCase__ = True
from torch.cuda.amp import autocast
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __snake_case :
snake_case__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
snake_case__ : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case__ : Optional[bool] = field(
default=_lowercase , metadata={"help": "Whether to freeze the feature extractor layers of the model."})
snake_case__ : Optional[bool] = field(
default=_lowercase , metadata={"help": "Whether to log verbose messages or not."} , )
snake_case__ : Optional[float] = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."})
snake_case__ : Optional[float] = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."})
snake_case__ : Optional[float] = field(
default=0.999_995 , metadata={"help": "Decay of gumbel temperature during training."})
def snake_case_ ( A_ : ModelArguments, A_ : TrainingArguments ):
'''simple docstring'''
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
_lowerCamelCase : List[str] = logging.WARNING
if model_args.verbose_logging:
_lowerCamelCase : Optional[Any] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCamelCase : int = logging.INFO
logger.setLevel(A_ )
@dataclass
class __snake_case :
snake_case__ : str = field(
default=_lowercase , metadata={"help": "The name of the dataset to use (via the datasets library)."})
snake_case__ : Optional[str] = field(
default=_lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."})
snake_case__ : Optional[str] = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
snake_case__ : Optional[str] = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
snake_case__ : Optional[str] = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
snake_case__ : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached preprocessed datasets or not."})
snake_case__ : Optional[int] = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
snake_case__ : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
snake_case__ : Optional[float] = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"})
@dataclass
class __snake_case :
snake_case__ : WavaVecaForPreTraining
snake_case__ : WavaVecaFeatureExtractor
snake_case__ : Union[bool, str] = "longest"
snake_case__ : Optional[int] = None
snake_case__ : Optional[int] = None
def __call__( self : int , __lowerCAmelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.feature_extractor.pad(
__lowerCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_lowerCamelCase : int = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
_lowerCamelCase : str = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCamelCase : Any = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
_lowerCamelCase : Any = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : List[Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
_lowerCamelCase : Optional[int] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__lowerCAmelCase , min_masks=2 , )
return batch
class __snake_case ( _lowercase):
def __init__( self : int , *__lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : List[Any]=1.0 , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = 0
_lowerCamelCase : List[str] = max_gumbel_temp
_lowerCamelCase : Tuple = min_gumbel_temp
_lowerCamelCase : Union[str, Any] = gumbel_temp_decay
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] ):
"""simple docstring"""
model.train()
_lowerCamelCase : Dict = self._prepare_inputs(__lowerCAmelCase )
if self.use_amp:
with autocast():
_lowerCamelCase : Optional[Any] = self.compute_loss(__lowerCAmelCase , __lowerCAmelCase )
else:
_lowerCamelCase : Dict = self.compute_loss(__lowerCAmelCase , __lowerCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCamelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCamelCase : List[str] = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
_lowerCamelCase : Optional[int] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__lowerCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__lowerCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__lowerCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_args_into_dataclasses()
configure_logger(A_, A_ )
# Downloading and loading a dataset from the hub.
_lowerCamelCase : List[Any] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCamelCase : Dict = DatasetDict()
_lowerCamelCase : Union[str, Any] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''', cache_dir=model_args.cache_dir, )
_lowerCamelCase : str = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''', cache_dir=model_args.cache_dir, )
else:
# make sure only "validation" and "train" keys remain"
_lowerCamelCase : int = DatasetDict()
_lowerCamelCase : Any = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split='''validation''', cache_dir=model_args.cache_dir, )
_lowerCamelCase : Union[str, Any] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F'''{data_args.train_split_name}''', cache_dir=model_args.cache_dir, )
# only normalized-inputs-training is supported
_lowerCamelCase : int = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=A_ )
def prepare_dataset(A_ : Optional[int] ):
# check that all files have the correct sampling rate
_lowerCamelCase , _lowerCamelCase : Dict = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCamelCase : Tuple = datasets.map(
A_, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
_lowerCamelCase : Optional[int] = vectorized_datasets.filter(
lambda A_ : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(A_ : Optional[int] ):
return feature_extractor(batch['''speech'''], sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCamelCase : str = vectorized_datasets.map(
A_, batched=A_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, remove_columns=vectorized_datasets['''train'''].column_names, )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCamelCase : List[str] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, gradient_checkpointing=training_args.gradient_checkpointing, )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
_lowerCamelCase : List[Any] = WavaVecaForPreTraining(A_ )
_lowerCamelCase : Union[str, Any] = DataCollatorForWavaVecaPretraining(model=A_, feature_extractor=A_ )
_lowerCamelCase : Optional[Any] = WavaVecaPreTrainer(
model=A_, data_collator=A_, args=A_, train_dataset=vectorized_datasets['''train'''], eval_dataset=vectorized_datasets['''validation'''], tokenizer=A_, max_gumbel_temp=model_args.max_gumbel_temperature, min_gumbel_temp=model_args.min_gumbel_temperature, gumbel_temp_decay=model_args.gumbel_temperature_decay, )
trainer.train()
if __name__ == "__main__":
main()
| 72 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : int = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] )
_lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase )
_lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
_lowerCamelCase : int = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
_lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' )
_lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = -1
_lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n"
_lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = -1
_lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 )
_lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 72 | 1 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = os.path.dirname(os.path.realpath(A_ ) )
_lowerCamelCase : Optional[Any] = os.path.join(A_, '''words.txt''' )
_lowerCamelCase : Dict = ''''''
with open(A_ ) as f:
_lowerCamelCase : Any = f.readline()
_lowerCamelCase : List[str] = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
_lowerCamelCase : Union[str, Any] = [
word
for word in [sum(ord(A_ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(A_ )
if __name__ == "__main__":
print(solution())
| 72 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : int = "retribert"
def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : int = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : int = share_encoders
_lowerCamelCase : Optional[Any] = projection_dim
| 72 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_lowercase)
class __snake_case ( _lowercase):
snake_case__ : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True})
snake_case__ : ClassVar[Features] = Features({"image": Image()})
snake_case__ : ClassVar[Features] = Features({"labels": ClassLabel})
snake_case__ : str = "image"
snake_case__ : str = "labels"
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Dict ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
_lowerCamelCase : List[Any] = copy.deepcopy(self )
_lowerCamelCase : str = self.label_schema.copy()
_lowerCamelCase : Union[str, Any] = features[self.label_column]
_lowerCamelCase : List[str] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 72 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_lowerCamelCase : Optional[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCamelCase : Any = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_lowerCamelCase : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : int = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 72 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.