code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def _lowerCamelCase( ):
__a = 0
for i in range(1 , 1_0_0_1 ):
total += i**i
return str(a )[-1_0:]
if __name__ == "__main__":
print(solution())
| 261
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261
| 1
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : int = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase_ , id=lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
if exitstatus == 5:
_UpperCAmelCase : List[str] = 0
# Doctest custom flag to ignore output.
A_ : Optional[Any] = doctest.register_optionflag("""IGNORE_RESULT""")
A_ : Tuple = doctest.OutputChecker
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def _snake_case ( self ,a_ ,a_ ,a_ ) -> List[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,a_ ,a_ ,a_ )
A_ : str = CustomOutputChecker
A_ : str = HfDoctestModule
A_ : Optional[Any] = HfDocTestParser
| 349
|
'''simple docstring'''
from __future__ import annotations
import math
def snake_case_ ( lowerCAmelCase_ )-> list[int]:
'''simple docstring'''
if num <= 0:
_UpperCAmelCase : List[Any] = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = [True] * (num + 1)
_UpperCAmelCase : int = []
_UpperCAmelCase : int = 2
_UpperCAmelCase : int = int(math.sqrt(lowerCAmelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase_ ):
if sieve[i] is True:
_UpperCAmelCase : Tuple = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 349
| 1
|
"""simple docstring"""
def A_ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase, _lowercase ):
raise TypeError("""Input value must be an 'int' type""" )
snake_case_ :Any = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : str = StableDiffusionSAGPipeline
_A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : List[str] = False
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
torch.manual_seed(0 )
snake_case_ :Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ :Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
snake_case_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ :Tuple = CLIPTextModel(snake_case )
snake_case_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ :Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: List[str]=0 ) -> str:
if str(snake_case ).startswith("""mps""" ):
snake_case_ :Tuple = torch.manual_seed(snake_case )
else:
snake_case_ :Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case )
snake_case_ :Any = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Union[str, Any] = """."""
snake_case_ :str = torch.manual_seed(0 )
snake_case_ :str = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :List[Any] = output.images
snake_case_ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :List[Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: Dict ) -> str:
snake_case_ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :Optional[int] = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Union[str, Any] = torch.manual_seed(0 )
snake_case_ :Tuple = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :Optional[int] = output.images
snake_case_ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Optional[int] = torch.manual_seed(0 )
snake_case_ :List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
snake_case_ :Optional[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 66
| 1
|
from __future__ import annotations
lowerCAmelCase__ : List[Any] =[-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase__ : Dict =[-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : int = len(A__ )
for i in range(A__ ):
SCREAMING_SNAKE_CASE_ : float = -1
for j in range(i + 1, A__ ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE_ : Optional[int] = arr[j]
break
result.append(A__ )
return result
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : str = []
for i, outer in enumerate(A__ ):
SCREAMING_SNAKE_CASE_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE_ : List[Any] = inner
break
result.append(A__ )
return result
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = len(A__ )
SCREAMING_SNAKE_CASE_ : list[float] = []
SCREAMING_SNAKE_CASE_ : list[float] = [-1] * arr_size
for index in reversed(range(A__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE_ : Any = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase__ : Optional[Any] =(
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 162
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCAmelCase__ : Optional[Any] =logging.getLogger(__name__)
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Any = np.argmax(A__, axis=1 )
return np.sum(outputs == labels )
def a__ ( A__ ):
with open(A__, encoding='utf_8' ) as f:
SCREAMING_SNAKE_CASE_ : int = csv.reader(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
next(A__ ) # skip the first line
for line in tqdm(A__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( A__, A__, A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : str = []
for dataset in encoded_datasets:
SCREAMING_SNAKE_CASE_ : str = len(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = np.zeros((n_batch, 2, input_len), dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : str = np.zeros((n_batch, 2), dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.full((n_batch, 2, input_len), fill_value=-1_0_0, dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE_ : Any = with_conta
SCREAMING_SNAKE_CASE_ : Union[str, Any] = with_conta
SCREAMING_SNAKE_CASE_ : Dict = len(A__ ) - 1
SCREAMING_SNAKE_CASE_ : str = len(A__ ) - 1
SCREAMING_SNAKE_CASE_ : Any = with_conta
SCREAMING_SNAKE_CASE_ : str = with_conta
SCREAMING_SNAKE_CASE_ : List[str] = mc_label
SCREAMING_SNAKE_CASE_ : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(A__ ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Any = argparse.ArgumentParser()
parser.add_argument('--model_name', type=A__, default='openai-gpt', help='pretrained model name' )
parser.add_argument('--do_train', action='store_true', help='Whether to run training.' )
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir', default=A__, type=A__, required=A__, help='The output directory where the model predictions and checkpoints will be written.', )
parser.add_argument('--train_dataset', type=A__, default='' )
parser.add_argument('--eval_dataset', type=A__, default='' )
parser.add_argument('--seed', type=A__, default=4_2 )
parser.add_argument('--num_train_epochs', type=A__, default=3 )
parser.add_argument('--train_batch_size', type=A__, default=8 )
parser.add_argument('--eval_batch_size', type=A__, default=1_6 )
parser.add_argument('--adam_epsilon', default=1E-8, type=A__, help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm', type=A__, default=1 )
parser.add_argument(
'--max_steps', default=-1, type=A__, help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
), )
parser.add_argument(
'--gradient_accumulation_steps', type=A__, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.', )
parser.add_argument('--learning_rate', type=A__, default=6.25E-5 )
parser.add_argument('--warmup_steps', default=0, type=A__, help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule', type=A__, default='warmup_linear' )
parser.add_argument('--weight_decay', type=A__, default=0.01 )
parser.add_argument('--lm_coef', type=A__, default=0.9 )
parser.add_argument('--n_valid', type=A__, default=3_7_4 )
parser.add_argument('--server_ip', type=A__, default='', help='Can be used for distant debugging.' )
parser.add_argument('--server_port', type=A__, default='', help='Can be used for distant debugging.' )
SCREAMING_SNAKE_CASE_ : Any = parser.parse_args()
print(A__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=A__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE_ : str = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(A__, A__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
SCREAMING_SNAKE_CASE_ : List[Any] = ['_start_', '_delimiter_', '_classify_']
SCREAMING_SNAKE_CASE_ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(A__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer.convert_tokens_to_ids(A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(A__ ) )
model.to(A__ )
# Load and encode the datasets
def tokenize_and_encode(A__ ):
if isinstance(A__, A__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A__ ) )
elif isinstance(A__, A__ ):
return obj
return [tokenize_and_encode(A__ ) for o in obj]
logger.info('Encoding dataset...' )
SCREAMING_SNAKE_CASE_ : int = load_rocstories_dataset(args.train_dataset )
SCREAMING_SNAKE_CASE_ : int = load_rocstories_dataset(args.eval_dataset )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (train_dataset, eval_dataset)
SCREAMING_SNAKE_CASE_ : List[str] = tokenize_and_encode(A__ )
# Compute the max input length for the Transformer
SCREAMING_SNAKE_CASE_ : Tuple = model.config.n_positions // 2 - 2
SCREAMING_SNAKE_CASE_ : Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
SCREAMING_SNAKE_CASE_ : str = min(A__, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
SCREAMING_SNAKE_CASE_ : Tuple = pre_process_datasets(A__, A__, A__, *A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = tensor_datasets[0], tensor_datasets[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TensorDataset(*A__ )
SCREAMING_SNAKE_CASE_ : str = RandomSampler(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(A__, sampler=A__, batch_size=args.train_batch_size )
SCREAMING_SNAKE_CASE_ : List[Any] = TensorDataset(*A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = SequentialSampler(A__ )
SCREAMING_SNAKE_CASE_ : str = DataLoader(A__, sampler=A__, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
SCREAMING_SNAKE_CASE_ : int = args.max_steps
SCREAMING_SNAKE_CASE_ : Any = args.max_steps // (len(A__ ) // args.gradient_accumulation_steps) + 1
else:
SCREAMING_SNAKE_CASE_ : List[Any] = len(A__ ) // args.gradient_accumulation_steps * args.num_train_epochs
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(model.named_parameters() )
SCREAMING_SNAKE_CASE_ : Optional[int] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = AdamW(A__, lr=args.learning_rate, eps=args.adam_epsilon )
SCREAMING_SNAKE_CASE_ : List[Any] = get_linear_schedule_with_warmup(
A__, num_warmup_steps=args.warmup_steps, num_training_steps=A__ )
if args.do_train:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc='Epoch' ):
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : List[Any] = tqdm(A__, desc='Training' )
for step, batch in enumerate(A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(t.to(A__ ) for t in batch )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = batch
SCREAMING_SNAKE_CASE_ : Tuple = model(A__, mc_token_ids=A__, lm_labels=A__, mc_labels=A__ )
SCREAMING_SNAKE_CASE_ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
SCREAMING_SNAKE_CASE_ : Tuple = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
SCREAMING_SNAKE_CASE_ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(A__, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = model.module if hasattr(A__, 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(args.output_dir, A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(args.output_dir, A__ )
torch.save(model_to_save.state_dict(), A__ )
model_to_save.config.to_json_file(A__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
SCREAMING_SNAKE_CASE_ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
SCREAMING_SNAKE_CASE_ : int = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(A__ )
if args.do_eval:
model.eval()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = 0, 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = 0, 0
for batch in tqdm(A__, desc='Evaluating' ):
SCREAMING_SNAKE_CASE_ : int = tuple(t.to(A__ ) for t in batch )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = batch
with torch.no_grad():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = model(
A__, mc_token_ids=A__, lm_labels=A__, mc_labels=A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = mc_logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mc_labels.to('cpu' ).numpy()
SCREAMING_SNAKE_CASE_ : Dict = accuracy(A__, A__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
SCREAMING_SNAKE_CASE_ : List[str] = eval_loss / nb_eval_steps
SCREAMING_SNAKE_CASE_ : List[Any] = eval_accuracy / nb_eval_examples
SCREAMING_SNAKE_CASE_ : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
SCREAMING_SNAKE_CASE_ : int = os.path.join(args.output_dir, 'eval_results.txt' )
with open(A__, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s', A__, str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 162
| 1
|
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_snake_case = True
except ImportError:
_snake_case = False
try:
from torch.hub import _get_torch_home
_snake_case = _get_torch_home()
except ImportError:
_snake_case = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
_snake_case = os.path.join(torch_cache_home, 'transformers')
_snake_case = 'https://cdn.huggingface.co'
_snake_case = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_snake_case = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
_snake_case = os.path.join(PATH, 'config.yaml')
_snake_case = os.path.join(PATH, 'attributes.txt')
_snake_case = os.path.join(PATH, 'objects.txt')
_snake_case = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
_snake_case = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
_snake_case = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
_snake_case = 'pytorch_model.bin'
_snake_case = 'config.yaml'
def _A ( snake_case=OBJECTS , snake_case=ATTRIBUTES ) -> int:
_lowercase : Tuple = []
with open(_lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_lowercase : Tuple = []
with open(_lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def _A ( snake_case ) -> Any:
_lowercase : str = OrderedDict()
with open(_lowerCamelCase , "rb" ) as f:
_lowercase : Optional[int] = pkl.load(_lowerCamelCase )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_lowercase : int = ckp.pop(_lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
_lowercase : Optional[int] = torch.tensor(_lowerCamelCase )
else:
assert isinstance(_lowerCamelCase , torch.tensor ), type(_lowerCamelCase )
_lowercase : int = v
return r
class a__ :
_SCREAMING_SNAKE_CASE : str = {}
def __init__( self , _UpperCamelCase , _UpperCamelCase = "root" , _UpperCamelCase=0 ):
"""simple docstring"""
_lowercase : str = name
_lowercase : List[Any] = level
_lowercase : List[str] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_lowercase : Union[str, Any] = copy.deepcopy(__a )
_lowercase : Tuple = copy.deepcopy(__a )
if isinstance(__a , __a ):
_lowercase : Any = Config(__a , name=__a , level=level + 1 )
_lowercase : int = v
setattr(self , __a , __a )
_lowercase : List[str] = d
def __repr__( self ):
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Any = val
_lowercase : int = val
_lowercase : Tuple = key.split("." )
_lowercase : Union[str, Any] = len(__a ) - 1
_lowercase : Any = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_lowercase : Optional[int] = val
else:
_lowercase : str = pointer[l]
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._pointer
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with open(f'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def _lowerCamelCase ( _UpperCamelCase ):
"""simple docstring"""
with open(__a ) as stream:
_lowercase : Dict = load(__a , Loader=__a )
return data
def __str__( self ):
"""simple docstring"""
_lowercase : List[str] = " "
if self._name != "root":
_lowercase : Dict = f'''{t * (self._level-1)}{self._name}:\n'''
else:
_lowercase : str = ""
_lowercase : str = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_lowercase : Optional[int] = level
return r[:-1]
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Tuple = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[int] = kwargs.pop("cache_dir" , __a )
_lowercase : str = kwargs.pop("force_download" , __a )
_lowercase : Optional[Any] = kwargs.pop("resume_download" , __a )
_lowercase : str = kwargs.pop("proxies" , __a )
_lowercase : int = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_lowercase : str = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_lowercase : List[str] = pretrained_model_name_or_path
else:
_lowercase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_lowercase : Optional[Any] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_lowercase : Tuple = Config.load_yaml(__a )
except EnvironmentError:
_lowercase : List[Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def _A ( snake_case ) -> List[str]:
_lowercase : Dict = torch.load("dump.pt" , map_location=in_tensor.device )
_lowercase : List[str] = in_tensor.numpy()
_lowercase : Optional[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_lowerCamelCase , _lowerCamelCase , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(_lowerCamelCase , _lowerCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def _A ( snake_case ) -> Union[str, Any]:
_lowercase : List[Any] = urlparse(_lowerCamelCase )
return parsed.scheme in ("http", "https")
def _A ( snake_case , snake_case , snake_case=True ) -> Optional[int]:
_lowercase : str = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_lowercase : Any = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def _A ( snake_case , snake_case , snake_case=None , snake_case=0 , snake_case=None , ) -> Any:
_lowercase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
ua += "; " + "; ".join("{}/{}".format(_lowerCamelCase , _lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
ua += "; " + user_agent
_lowercase : int = {"user-agent": ua}
if resume_size > 0:
_lowercase : List[Any] = "bytes=%d-" % (resume_size,)
_lowercase : Tuple = requests.get(_lowerCamelCase , stream=_lowerCamelCase , proxies=_lowerCamelCase , headers=_lowerCamelCase )
if response.status_code == 4_16: # Range not satisfiable
return
_lowercase : List[str] = response.headers.get("Content-Length" )
_lowercase : Any = resume_size + int(_lowerCamelCase ) if content_length is not None else None
_lowercase : List[Any] = tqdm(
unit="B" , unit_scale=_lowerCamelCase , total=_lowerCamelCase , initial=_lowerCamelCase , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowerCamelCase ) )
temp_file.write(_lowerCamelCase )
progress.close()
def _A ( snake_case , snake_case=None , snake_case=False , snake_case=None , snake_case=10 , snake_case=False , snake_case=None , snake_case=False , ) -> Dict:
if cache_dir is None:
_lowercase : int = TRANSFORMERS_CACHE
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowercase : Any = str(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowercase : Union[str, Any] = None
if not local_files_only:
try:
_lowercase : List[Any] = requests.head(_lowerCamelCase , allow_redirects=_lowerCamelCase , proxies=_lowerCamelCase , timeout=_lowerCamelCase )
if response.status_code == 2_00:
_lowercase : Any = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_lowercase : int = url_to_filename(_lowerCamelCase , _lowerCamelCase )
# get cache path to put the file
_lowercase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowerCamelCase ):
return cache_path
else:
_lowercase : int = [
file
for file in fnmatch.filter(os.listdir(_lowerCamelCase ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(_lowerCamelCase ) > 0:
return os.path.join(_lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(_lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_lowercase : int = cache_path + ".lock"
with FileLock(_lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_lowercase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(_lowerCamelCase , "a+b" ) as f:
yield f
_lowercase : Optional[int] = _resumable_file_manager
if os.path.exists(_lowerCamelCase ):
_lowercase : str = os.stat(_lowerCamelCase ).st_size
else:
_lowercase : Union[str, Any] = 0
else:
_lowercase : int = partial(tempfile.NamedTemporaryFile , dir=_lowerCamelCase , delete=_lowerCamelCase )
_lowercase : List[str] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , _lowerCamelCase , temp_file.name , )
http_get(
_lowerCamelCase , _lowerCamelCase , proxies=_lowerCamelCase , resume_size=_lowerCamelCase , user_agent=_lowerCamelCase , )
os.replace(temp_file.name , _lowerCamelCase )
_lowercase : int = {"url": url, "etag": etag}
_lowercase : int = cache_path + ".json"
with open(_lowerCamelCase , "w" ) as meta_file:
json.dump(_lowerCamelCase , _lowerCamelCase )
return cache_path
def _A ( snake_case , snake_case=None ) -> str:
_lowercase : List[Any] = url.encode("utf-8" )
_lowercase : Union[str, Any] = shaaaa(_lowerCamelCase )
_lowercase : str = url_hash.hexdigest()
if etag:
_lowercase : Tuple = etag.encode("utf-8" )
_lowercase : Optional[Any] = shaaaa(_lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def _A ( snake_case , snake_case=None , snake_case=False , snake_case=None , snake_case=False , snake_case=None , snake_case=False , snake_case=False , snake_case=False , ) -> int:
if cache_dir is None:
_lowercase : Any = TRANSFORMERS_CACHE
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowercase : int = str(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowercase : int = str(_lowerCamelCase )
if is_remote_url(_lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
_lowercase : Tuple = get_from_cache(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , user_agent=_lowerCamelCase , local_files_only=_lowerCamelCase , )
elif os.path.exists(_lowerCamelCase ):
# File, and it exists.
_lowercase : str = url_or_filename
elif urlparse(_lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(_lowerCamelCase ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(_lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(_lowerCamelCase ) and not tarfile.is_tarfile(_lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_lowercase : Tuple = os.path.split(_lowerCamelCase )
_lowercase : Union[str, Any] = output_file.replace("." , "-" ) + "-extracted"
_lowercase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ) and os.listdir(_lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_lowercase : Any = output_path + ".lock"
with FileLock(_lowerCamelCase ):
shutil.rmtree(_lowerCamelCase , ignore_errors=_lowerCamelCase )
os.makedirs(_lowerCamelCase )
if is_zipfile(_lowerCamelCase ):
with ZipFile(_lowerCamelCase , "r" ) as zip_file:
zip_file.extractall(_lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(_lowerCamelCase ):
_lowercase : List[Any] = tarfile.open(_lowerCamelCase )
tar_file.extractall(_lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(_lowerCamelCase ) )
return output_path_extracted
return output_path
def _A ( snake_case , snake_case="," ) -> Tuple:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
with open(_lowerCamelCase ) as f:
_lowercase : Optional[int] = eval(f.read() )
else:
_lowercase : Union[str, Any] = requests.get(_lowerCamelCase )
try:
_lowercase : str = requests.json()
except Exception:
_lowercase : str = req.content.decode()
assert data is not None, "could not connect"
try:
_lowercase : Optional[Any] = eval(_lowerCamelCase )
except Exception:
_lowercase : int = data.split("\n" )
req.close()
return data
def _A ( snake_case ) -> Any:
_lowercase : str = requests.get(_lowerCamelCase )
_lowercase : Any = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _A ( snake_case ) -> Any:
_lowercase : List[str] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowerCamelCase )
with open(_lowerCamelCase , "rb" ) as stream:
_lowercase : Any = pkl.load(_lowerCamelCase )
_lowercase : Optional[Any] = weights.pop("model" )
_lowercase : Any = {}
for k, v in model.items():
_lowercase : Union[str, Any] = torch.from_numpy(_lowerCamelCase )
if "running_var" in k:
_lowercase : str = torch.tensor([0] )
_lowercase : str = k.replace("running_var" , "num_batches_tracked" )
_lowercase : List[str] = zero
return new
def _A ( ) -> Optional[int]:
print(F'''{os.path.abspath(os.path.join(_lowerCamelCase , os.pardir ) )}/demo.ipynb''' )
def _A ( snake_case , snake_case="RGB" ) -> int:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
_lowercase : List[str] = cva.imread(_lowerCamelCase )
else:
_lowercase : List[str] = get_image_from_url(_lowerCamelCase )
assert img is not None, F'''could not connect to: {im}'''
_lowercase : Optional[Any] = cva.cvtColor(_lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_lowercase : Optional[Any] = img[:, :, ::-1]
return img
def _A ( snake_case , snake_case=1 ) -> Optional[int]:
return (images[i : i + batch] for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ))
| 250
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase_ ( a):
def snake_case__ ( self, __a):
'''simple docstring'''
return 0.0
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 512
_lowerCAmelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : int = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : str = np.abs(np.fft.fft(_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = 20 * np.logaa(_lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowerCAmelCase : List[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_lowerCamelCase )
plt.show()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 512
_lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : str = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) )
plt.show()
| 36
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
'''simple docstring'''
@staticmethod
def _lowerCAmelCase ( *_a , **_a ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
lowerCamelCase = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = object_detector(examples[0] , threshold=0.0 )
lowerCamelCase = len(_a )
self.assertGreater(_a , 0 )
self.assertEqual(
_a , [
{
"""score""": ANY(_a ),
"""label""": ANY(_a ),
"""box""": {"""xmin""": ANY(_a ), """ymin""": ANY(_a ), """xmax""": ANY(_a ), """ymax""": ANY(_a )},
}
for i in range(_a )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
lowerCamelCase = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"""score""": 0.7_235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6_748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6_419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
lowerCamelCase = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"""score""": 0.7_235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6_748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6_419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""zero-shot-object-detection""" )
lowerCamelCase = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
lowerCamelCase = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = 0.2
lowerCamelCase = pipeline("""zero-shot-object-detection""" )
lowerCamelCase = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=_a , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = 2
lowerCamelCase = pipeline("""zero-shot-object-detection""" )
lowerCamelCase = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=_a , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 168
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
lowerCamelCase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_a )
return config
def _lowerCAmelCase ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.scheduler_classes[0]
lowerCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase = self.dummy_model()
lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase = scheduler.scale_model_input(_a , _a )
lowerCamelCase = model(_a , _a )
lowerCamelCase = scheduler.step(_a , _a , _a )
lowerCamelCase = output.prev_sample
lowerCamelCase = torch.sum(torch.abs(_a ) )
lowerCamelCase = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def _lowerCAmelCase ( self ):
"""simple docstring"""
if torch_device == "mps":
return
lowerCamelCase = self.scheduler_classes[0]
lowerCamelCase = self.get_scheduler_config()
lowerCamelCase = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase = self.dummy_model()
lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase = scheduler.scale_model_input(_a , _a )
lowerCamelCase = model(_a , _a )
lowerCamelCase = scheduler.step(_a , _a , _a )
lowerCamelCase = output.prev_sample
lowerCamelCase = torch.sum(torch.abs(_a ) )
lowerCamelCase = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def _lowerCAmelCase ( self ):
"""simple docstring"""
if torch_device == "mps":
return
lowerCamelCase = self.scheduler_classes[0]
lowerCamelCase = self.get_scheduler_config()
lowerCamelCase = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
lowerCamelCase = self.dummy_model()
lowerCamelCase = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase = scheduler.scale_model_input(_a , _a )
lowerCamelCase = model(_a , _a )
lowerCamelCase = scheduler.step(_a , _a , _a )
lowerCamelCase = output.prev_sample
lowerCamelCase = torch.sum(torch.abs(_a ) )
lowerCamelCase = torch.mean(torch.abs(_a ) )
if str(_a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 168
| 1
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_lowerCamelCase : Optional[Any] = 'docs/source/en/_toctree.yml'
def __a ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = defaultdict(UpperCAmelCase )
A = []
A = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(UpperCAmelCase )
A = new_doc_list
A = [key for key, value in counts.items() if value > 1]
A = []
for duplicate_key in duplicates:
A = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(UpperCAmelCase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
A = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCAmelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(UpperCAmelCase )
# Sort
return overview_doc
def __a ( UpperCAmelCase=False ) ->Union[str, Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding="""utf-8""" ) as f:
A = yaml.safe_load(f.read() )
# Get to the API doc
A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A = content[api_idx]["""sections"""]
# Then to the model doc
A = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
A = api_doc[scheduler_idx]["""sections"""]
A = clean_doc_toc(UpperCAmelCase )
A = False
if new_scheduler_doc != scheduler_doc:
A = True
if overwrite:
A = new_scheduler_doc
if diff:
if overwrite:
A = api_doc
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(UpperCAmelCase , allow_unicode=UpperCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def __a ( UpperCAmelCase=False ) ->int:
"""simple docstring"""
with open(UpperCAmelCase , encoding="""utf-8""" ) as f:
A = yaml.safe_load(f.read() )
# Get to the API doc
A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A = content[api_idx]["""sections"""]
# Then to the model doc
A = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
A = False
A = api_doc[pipeline_idx]["""sections"""]
A = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
A = pipeline_doc["""section"""]
A = clean_doc_toc(UpperCAmelCase )
if overwrite:
A = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCAmelCase )
# sort overall pipeline doc
A = clean_doc_toc(UpperCAmelCase )
if new_pipeline_docs != pipeline_docs:
A = True
if overwrite:
A = new_pipeline_docs
if diff:
if overwrite:
A = api_doc
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(UpperCAmelCase , allow_unicode=UpperCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCamelCase : List[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 258
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = XLMRobertaTokenizer
_snake_case = XLMRobertaTokenizerFast
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : Tuple = XLMRobertaTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ) -> int:
snake_case : str = """<pad>"""
snake_case : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_0_0_2 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = XLMRobertaTokenizer(A , keep_accents=A )
snake_case : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case : Any = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCAmelCase ( self ) -> Optional[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case : List[Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Dict = self.rust_tokenizer_class.from_pretrained(A , **A )
snake_case : int = self.tokenizer_class.from_pretrained(A , **A )
snake_case : Optional[int] = tempfile.mkdtemp()
snake_case : List[Any] = tokenizer_r.save_pretrained(A )
snake_case : Optional[Any] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case : Union[str, Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
snake_case : str = tokenizer_r.from_pretrained(A )
snake_case : Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Tuple = tokenizer_r.save_pretrained(A , legacy_format=A )
snake_case : Any = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
snake_case : int = tokenizer_r.from_pretrained(A )
snake_case : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
snake_case : Tuple = tempfile.mkdtemp()
snake_case : str = tokenizer_r.save_pretrained(A , legacy_format=A )
snake_case : Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case : Dict = tokenizer_r.from_pretrained(A )
snake_case : List[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@cached_property
def UpperCAmelCase ( self ) -> str:
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def UpperCAmelCase ( self ) -> Optional[int]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A , f.name )
snake_case : Any = XLMRobertaTokenizer(f.name , keep_accents=A )
snake_case : List[Any] = pickle.dumps(A )
pickle.loads(A )
def UpperCAmelCase ( self ) -> Any:
if not self.test_rust_tokenizer:
return
snake_case : str = self.get_tokenizer()
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : str = """I was born in 92000, and this is falsé."""
snake_case : Optional[int] = tokenizer.tokenize(A )
snake_case : List[Any] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
snake_case : List[Any] = tokenizer.encode(A , add_special_tokens=A )
snake_case : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
snake_case : Optional[Any] = self.get_rust_tokenizer()
snake_case : str = tokenizer.encode(A )
snake_case : Dict = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Dict = """Hello World!"""
snake_case : Union[str, Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def UpperCAmelCase ( self ) -> str:
snake_case : int = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
snake_case : Tuple = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def UpperCAmelCase ( self ) -> str:
# fmt: off
snake_case : Tuple = {"""input_ids""": [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 124
| 0
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_UpperCamelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ):
"""simple docstring"""
return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths )
def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Union[str, Any] = []
if args.gold_data_mode == "qa":
__UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , sep="""\t""" , header=lowerCAmelCase__ )
for answer_list in data[1]:
__UpperCAmelCase : Optional[int] = ast.literal_eval(lowerCAmelCase__ )
answers.append(lowerCAmelCase__ )
else:
__UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : str = [[reference] for reference in references]
__UpperCAmelCase : Optional[int] = 0
for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : int = 100.0 * em / total
__UpperCAmelCase : Dict = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = args.k
__UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Union[str, Any] = 0
for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__UpperCAmelCase : List[str] = set(hypo.split("""\t""" )[:k] )
__UpperCAmelCase : List[Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__UpperCAmelCase : List[str] = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ):
"""simple docstring"""
def strip_title(lowerCAmelCase__ : Optional[int] ):
if title.startswith("""\"""" ):
__UpperCAmelCase : List[Any] = title[1:]
if title.endswith("""\"""" ):
__UpperCAmelCase : int = title[:-1]
return title
__UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )["""input_ids"""].to(args.device )
__UpperCAmelCase : str = rag_model.rag.question_encoder(lowerCAmelCase__ )
__UpperCAmelCase : int = question_enc_outputs[0]
__UpperCAmelCase : Dict = rag_model.retriever(
lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
__UpperCAmelCase : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__UpperCAmelCase : Union[str, Any] = []
for docs in all_docs:
__UpperCAmelCase : int = [strip_title(lowerCAmelCase__ ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(lowerCAmelCase__ ) )
return provenance_strings
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
with torch.no_grad():
__UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
__UpperCAmelCase : List[str] = inputs_dict.input_ids.to(args.device )
__UpperCAmelCase : List[Any] = inputs_dict.attention_mask.to(args.device )
__UpperCAmelCase : List[str] = rag_model.generate( # rag_model overwrites generate
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__UpperCAmelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
if args.print_predictions:
for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info("""Q: {} - A: {}""".format(lowerCAmelCase__ , lowerCAmelCase__ ) )
return answers
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCAmelCase__ , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=lowerCAmelCase__ , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCAmelCase__ , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=lowerCAmelCase__ , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCAmelCase__ , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=lowerCAmelCase__ , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=lowerCAmelCase__ , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=lowerCAmelCase__ , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=lowerCAmelCase__ , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=lowerCAmelCase__ , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=lowerCAmelCase__ , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=lowerCAmelCase__ , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
__UpperCAmelCase : str = parser.parse_args()
__UpperCAmelCase : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def lowercase_ ( lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = {}
if args.model_type is None:
__UpperCAmelCase : str = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
__UpperCAmelCase : Tuple = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
__UpperCAmelCase : Dict = args.n_docs
if args.index_name is not None:
__UpperCAmelCase : Union[str, Any] = args.index_name
if args.index_path is not None:
__UpperCAmelCase : Dict = args.index_path
else:
__UpperCAmelCase : str = BartForConditionalGeneration
__UpperCAmelCase : str = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
__UpperCAmelCase : Any = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(lowerCAmelCase__ ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
__UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__UpperCAmelCase : Any = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ )
model.retriever.init_retrieval()
else:
__UpperCAmelCase : Tuple = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
__UpperCAmelCase : Union[str, Any] = []
for line in tqdm(lowerCAmelCase__ ):
questions.append(line.strip() )
if len(lowerCAmelCase__ ) == args.eval_batch_size:
__UpperCAmelCase : Any = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write("""\n""".join(lowerCAmelCase__ ) + """\n""" )
preds_file.flush()
__UpperCAmelCase : List[str] = []
if len(lowerCAmelCase__ ) > 0:
__UpperCAmelCase : Optional[Any] = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write("""\n""".join(lowerCAmelCase__ ) )
preds_file.flush()
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_UpperCamelCase = get_args()
main(args)
| 16
|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def __A ( self ) -> Any:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""},
] , )
__UpperCAmelCase : List[str] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] , )
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
__UpperCAmelCase : Dict = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
__UpperCAmelCase : str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
] , )
__UpperCAmelCase : Optional[int] = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
__UpperCAmelCase : str = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
@require_torch
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(__UpperCAmelCase )
@slow
@require_tf
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""},
] , )
__UpperCAmelCase : Optional[int] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] , )
__UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : int = None
self.run_pipeline_test(__UpperCAmelCase , [] )
@require_tf
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : str = None
self.run_pipeline_test(__UpperCAmelCase , [] )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
__UpperCAmelCase : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : int = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = fill_masker.tokenizer
__UpperCAmelCase : Union[str, Any] = fill_masker.model
__UpperCAmelCase : Tuple = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : int = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : Union[str, Any] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
__UpperCAmelCase , [
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
] , )
with self.assertRaises(__UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__UpperCAmelCase ):
fill_masker("""This is""" )
self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase )
self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase )
self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase )
self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Dict = tokenizer.get_vocab()
__UpperCAmelCase : Dict = sorted(vocab.keys() )[:2]
# Pipeline argument
__UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase )
__UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : Any = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase )
__UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) )
# Call argument
__UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : Tuple = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : List[Any] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase )
__UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) )
# Score equivalence
__UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase )
__UpperCAmelCase : Dict = [top_mask["""token_str"""] for top_mask in outputs]
__UpperCAmelCase : str = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase ) == set(__UpperCAmelCase ):
__UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase )
__UpperCAmelCase : int = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(__UpperCAmelCase ):
__UpperCAmelCase : Any = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__UpperCAmelCase ):
__UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""""""] )
with self.assertRaises(__UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="""""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2 )
__UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : int = tokenizer.get_vocab()
__UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
# top_k=2, ntargets=3
__UpperCAmelCase : Dict = sorted(vocab.keys() )[:3]
__UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=__UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__UpperCAmelCase : Tuple = [el["""token_str"""] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase ).issubset(__UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=__UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
__UpperCAmelCase : Dict = sorted(vocab.keys() )[:3]
__UpperCAmelCase : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__UpperCAmelCase : Optional[int] = fill_masker(f'My name is {tokenizer.mask_token}' , targets=__UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__UpperCAmelCase ) , 3 )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : Dict = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
] , )
| 16
| 1
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a__ : Optional[Any] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowercase ( __A ):
'''simple docstring'''
config.addinivalue_line(
"""markers""" ,"""is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" ,"""is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" ,"""is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" ,"""is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" ,"""accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" ,"""tool_tests: mark the tool tests that are run on their specific schedule""" )
def _lowercase ( __A ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def _lowercase ( __A ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__A ,id=__A )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if exitstatus == 5:
__UpperCamelCase = 0
# Doctest custom flag to ignore output.
a__ : List[Any] = doctest.register_optionflag('IGNORE_RESULT')
a__ : Optional[int] = doctest.OutputChecker
class UpperCAmelCase__ ( UpperCAmelCase_):
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase , lowercase , lowercase )
a__ : Any = CustomOutputChecker
a__ : Tuple = HfDoctestModule
a__ : Union[str, Any] = HfDocTestParser
| 349
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
a__ : Optional[Any] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 349
| 1
|
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ) -> int:
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = eval_examples
lowerCAmelCase__ = post_process_function
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_ = "eval" ) -> str:
lowerCAmelCase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase__ = self.get_eval_dataloader(lowerCamelCase_ )
lowerCAmelCase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ = self.compute_metrics
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCAmelCase__ = time.time()
try:
lowerCAmelCase__ = eval_loop(
lowerCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
lowerCAmelCase__ = compute_metrics
lowerCAmelCase__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCAmelCase__ = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions )
lowerCAmelCase__ = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowerCAmelCase__ = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
else:
lowerCAmelCase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase_ )
return metrics
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_ = "test" ) -> Any:
lowerCAmelCase__ = self.get_test_dataloader(lowerCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ = self.compute_metrics
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCAmelCase__ = time.time()
try:
lowerCAmelCase__ = eval_loop(
lowerCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
lowerCAmelCase__ = compute_metrics
lowerCAmelCase__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase__ = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions , '''predict''' )
lowerCAmelCase__ = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowerCAmelCase__ = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase_ )
| 228
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : int = "linear"
lowercase__ : Any = "cosine"
lowercase__ : Optional[int] = "cosine_with_restarts"
lowercase__ : Optional[Any] = "polynomial"
lowercase__ : Tuple = "constant"
lowercase__ : Optional[int] = "constant_with_warmup"
lowercase__ : Optional[int] = "piecewise_constant"
def _snake_case ( A , A = -1 ) -> Any:
return LambdaLR(A , lambda A : 1 , last_epoch=A )
def _snake_case ( A , A , A = -1 ) -> Optional[Any]:
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1.0 , A ) )
return 1.0
return LambdaLR(A , A , last_epoch=A )
def _snake_case ( A , A , A = -1 ) -> Union[str, Any]:
lowerCAmelCase__ = {}
lowerCAmelCase__ = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase__ , lowerCAmelCase__ = rule_str.split(''':''' )
lowerCAmelCase__ = int(A )
lowerCAmelCase__ = float(A )
lowerCAmelCase__ = value
lowerCAmelCase__ = float(rule_list[-1] )
def create_rules_function(A , A ):
def rule_func(A ) -> float:
lowerCAmelCase__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase__ = create_rules_function(A , A )
return LambdaLR(A , A , last_epoch=A )
def _snake_case ( A , A , A , A=-1 ) -> Optional[int]:
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1 , A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A , A , A )
def _snake_case ( A , A , A , A = 0.5 , A = -1 ) -> List[str]:
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1 , A ) )
lowerCAmelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A ) * 2.0 * progress )) )
return LambdaLR(A , A , A )
def _snake_case ( A , A , A , A = 1 , A = -1 ) -> Union[str, Any]:
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1 , A ) )
lowerCAmelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A ) * progress) % 1.0) )) )
return LambdaLR(A , A , A )
def _snake_case ( A , A , A , A=1E-7 , A=1.0 , A=-1 ) -> Union[str, Any]:
lowerCAmelCase__ = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1 , A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase__ = lr_init - lr_end
lowerCAmelCase__ = num_training_steps - num_warmup_steps
lowerCAmelCase__ = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A , A , A )
__UpperCAmelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _snake_case ( A , A , A = None , A = None , A = None , A = 1 , A = 1.0 , A = -1 , ) -> int:
lowerCAmelCase__ = SchedulerType(A )
lowerCAmelCase__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A , last_epoch=A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A , step_rules=A , last_epoch=A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A , num_warmup_steps=A , last_epoch=A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A , num_warmup_steps=A , num_training_steps=A , num_cycles=A , last_epoch=A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A , num_warmup_steps=A , num_training_steps=A , power=A , last_epoch=A , )
return schedule_func(
A , num_warmup_steps=A , num_training_steps=A , last_epoch=A )
| 228
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 162
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "roc_bert"
def __init__( self , UpperCamelCase__=30522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-1_2 , UpperCamelCase__=True , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=768 , UpperCamelCase__=910 , UpperCamelCase__=512 , UpperCamelCase__=24858 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
A_ = use_cache
A_ = enable_pronunciation
A_ = enable_shape
A_ = pronunciation_embed_dim
A_ = pronunciation_vocab_size
A_ = shape_embed_dim
A_ = shape_vocab_size
A_ = concat_input
A_ = position_embedding_type
A_ = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 162
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = ['image_processor', 'tokenizer']
lowercase = 'CLIPImageProcessor'
lowercase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : Union[str, Any] , lowerCamelCase : List[str]=None , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : List[Any] ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase , )
lowerCAmelCase_ : Optional[int] = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Optional[Any] , lowerCamelCase : List[Any]=None , lowerCamelCase : Tuple=None , lowerCamelCase : int=None , **lowerCamelCase : int ) -> Optional[int]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCAmelCase_ : int = self.tokenizer(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if images is not None:
lowerCAmelCase_ : int = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
lowerCAmelCase_ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def __lowercase ( self : int , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __lowercase ( self : Optional[int] , *lowerCamelCase : str , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __lowercase ( self : Optional[Any] ) -> int:
lowerCAmelCase_ : List[str] = self.tokenizer.model_input_names
lowerCAmelCase_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 371
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : int | float | str , A__ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase_ : str = int(A__ )
lowerCAmelCase_ : Tuple = int(A__ )
lowerCAmelCase_ : list[str] = []
for temp in range(int(A__ ) ):
series.append(f'1 / {pow(temp + 1 , int(A__ ) )}' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : str = int(input("Enter the last number (nth term) of the P-Series"))
__A : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 89
| 0
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = (DDIMParallelScheduler,)
_lowerCAmelCase = (("""eta""", 0.0), ("""num_inference_steps""", 5_0))
def __UpperCAmelCase ( self , **__magic_name__ ) -> Union[str, Any]:
_a = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**__magic_name__ )
return config
def __UpperCAmelCase ( self , **__magic_name__ ) -> str:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(**__magic_name__ )
_a = scheduler_class(**__magic_name__ )
_a , _a = 10, 0.0
_a = self.dummy_model()
_a = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for t in scheduler.timesteps:
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def __UpperCAmelCase ( self ) -> str:
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def __UpperCAmelCase ( self ) -> List[Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__magic_name__ )
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(steps_offset=1 )
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ )
def __UpperCAmelCase ( self ) -> int:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__magic_name__ )
def __UpperCAmelCase ( self ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__magic_name__ )
def __UpperCAmelCase ( self ) -> List[str]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
self.check_over_configs(thresholding=__magic_name__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__magic_name__ , prediction_type=__magic_name__ , sample_max_value=__magic_name__ , )
def __UpperCAmelCase ( self ) -> List[Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=__magic_name__ )
def __UpperCAmelCase ( self ) -> int:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=__magic_name__ , num_inference_steps=__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__magic_name__ , eta=__magic_name__ )
def __UpperCAmelCase ( self ) -> List[str]:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__magic_name__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_4_7_7_1 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_2_4_6_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.0_2 ) ) < 1e-5
def __UpperCAmelCase ( self ) -> List[str]:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__magic_name__ )
_a , _a = 10, 0.0
scheduler.set_timesteps(__magic_name__ )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(__magic_name__ )[0:3, None].repeat(1 , __magic_name__ )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(__magic_name__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __magic_name__ )
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1e-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1e-3
def __UpperCAmelCase ( self ) -> List[Any]:
_a = self.full_loop()
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1e-3
def __UpperCAmelCase ( self ) -> int:
_a = self.full_loop(prediction_type='v_prediction' )
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1e-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1e-3
def __UpperCAmelCase ( self ) -> Any:
# We specify different beta, so that the first alpha is 0.99
_a = self.full_loop(set_alpha_to_one=__magic_name__ , beta_start=0.0_1 )
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1e-3
def __UpperCAmelCase ( self ) -> Optional[int]:
# We specify different beta, so that the first alpha is 0.99
_a = self.full_loop(set_alpha_to_one=__magic_name__ , beta_start=0.0_1 )
_a = torch.sum(torch.abs(__magic_name__ ) )
_a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1e-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1e-3
| 168
|
'''simple docstring'''
import math
class a :
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
_a = 0.0
_a = 0.0
for i in range(len(__magic_name__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> list[list[int | float]]:
for i in range(len(__magic_name__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _A () -> None:
'''simple docstring'''
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowerCAmelCase__ ):
for j in range(len(lowerCAmelCase__ ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# Update the winning vector
_a = self_organizing_map.update(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 168
| 1
|
from math import loga
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase_ ( _UpperCamelCase ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> XGBClassifier:
"""simple docstring"""
snake_case_ : Optional[Any] = XGBClassifier()
classifier.fit(_UpperCamelCase , _UpperCamelCase )
return classifier
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : Optional[Any] = load_iris()
snake_case_ , snake_case_ : str = data_handling(_UpperCamelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = train_test_split(
_UpperCamelCase , _UpperCamelCase , test_size=0.25 )
snake_case_ : List[str] = iris['''target_names''']
# Create an XGBoost Classifier from the training data
snake_case_ : int = xgboost(_UpperCamelCase , _UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 279
| 0
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
return max(metric_fn(__lowerCamelCase , __lowerCamelCase ) for gt in ground_truths )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
lowercase__ : List[Any] = [line.strip() for line in open(__lowerCamelCase , '''r''' ).readlines()]
lowercase__ : Union[str, Any] = []
if args.gold_data_mode == "qa":
lowercase__ : List[Any] = pd.read_csv(__lowerCamelCase , sep='''\t''' , header=__lowerCamelCase )
for answer_list in data[1]:
lowercase__ : List[str] = ast.literal_eval(__lowerCamelCase )
answers.append(__lowerCamelCase )
else:
lowercase__ : int = [line.strip() for line in open(__lowerCamelCase , '''r''' ).readlines()]
lowercase__ : str = [[reference] for reference in references]
lowercase__ : str = 0
for prediction, ground_truths in zip(__lowerCamelCase , __lowerCamelCase ):
total += 1
em += metric_max_over_ground_truths(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
fa += metric_max_over_ground_truths(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ : str = 1_0_0.0 * em / total
lowercase__ : Tuple = 1_0_0.0 * fa / total
logger.info(f"""F1: {fa:.2f}""" )
logger.info(f"""EM: {em:.2f}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
lowercase__ : Union[str, Any] = args.k
lowercase__ : Tuple = [line.strip() for line in open(__lowerCamelCase , '''r''' ).readlines()]
lowercase__ : Any = [line.strip() for line in open(__lowerCamelCase , '''r''' ).readlines()]
lowercase__ : Dict = 0
for hypo, reference in zip(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : Tuple = set(hypo.split('''\t''' )[:k] )
lowercase__ : int = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowercase__ : int = 1_0_0.0 * em / total
logger.info(f"""Precision@{k}: {em: .2f}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
def strip_title(__lowerCamelCase ):
if title.startswith('''"''' ):
lowercase__ : List[str] = title[1:]
if title.endswith('''"''' ):
lowercase__ : List[str] = title[:-1]
return title
lowercase__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowerCamelCase , return_tensors='''pt''' , padding=__lowerCamelCase , truncation=__lowerCamelCase , )['''input_ids'''].to(args.device )
lowercase__ : Tuple = rag_model.rag.question_encoder(__lowerCamelCase )
lowercase__ : Union[str, Any] = question_enc_outputs[0]
lowercase__ : Dict = rag_model.retriever(
__lowerCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
lowercase__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowercase__ : Union[str, Any] = []
for docs in all_docs:
lowercase__ : List[Any] = [strip_title(__lowerCamelCase ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(__lowerCamelCase ) )
return provenance_strings
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
with torch.no_grad():
lowercase__ : Union[str, Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowerCamelCase , return_tensors='''pt''' , padding=__lowerCamelCase , truncation=__lowerCamelCase )
lowercase__ : Dict = inputs_dict.input_ids.to(args.device )
lowercase__ : int = inputs_dict.attention_mask.to(args.device )
lowercase__ : Optional[Any] = rag_model.generate( # rag_model overwrites generate
__lowerCamelCase , attention_mask=__lowerCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__lowerCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowercase__ : Any = rag_model.retriever.generator_tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
if args.print_predictions:
for q, a in zip(__lowerCamelCase , __lowerCamelCase ):
logger.info('''Q: {} - A: {}'''.format(__lowerCamelCase , __lowerCamelCase ) )
return answers
def __UpperCAmelCase ( ) -> str:
lowercase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=__lowerCamelCase , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=__lowerCamelCase , choices=['''exact''', '''compressed''', '''legacy'''] , type=__lowerCamelCase , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=__lowerCamelCase , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=__lowerCamelCase , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=__lowerCamelCase , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=__lowerCamelCase , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=__lowerCamelCase , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=__lowerCamelCase , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=__lowerCamelCase , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=__lowerCamelCase , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=__lowerCamelCase , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
lowercase__ : List[Any] = parser.parse_args()
lowercase__ : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : str = {}
if args.model_type is None:
lowercase__ : int = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
lowercase__ : str = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
lowercase__ : Union[str, Any] = args.n_docs
if args.index_name is not None:
lowercase__ : Any = args.index_name
if args.index_path is not None:
lowercase__ : List[str] = args.index_path
else:
lowercase__ : Union[str, Any] = BartForConditionalGeneration
lowercase__ : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , __lowerCamelCase )
lowercase__ : Optional[int] = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
lowercase__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(__lowerCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(__lowerCamelCase ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
lowercase__ : Any = RagRetriever.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowercase__ : Optional[int] = model_class.from_pretrained(__lowerCamelCase , retriever=__lowerCamelCase , **__lowerCamelCase )
model.retriever.init_retrieval()
else:
lowercase__ : str = model_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
lowercase__ : Dict = []
for line in tqdm(__lowerCamelCase ):
questions.append(line.strip() )
if len(__lowerCamelCase ) == args.eval_batch_size:
lowercase__ : Optional[int] = evaluate_batch_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
preds_file.write('''\n'''.join(__lowerCamelCase ) + '''\n''' )
preds_file.flush()
lowercase__ : List[Any] = []
if len(__lowerCamelCase ) > 0:
lowercase__ : Union[str, Any] = evaluate_batch_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
preds_file.write('''\n'''.join(__lowerCamelCase ) )
preds_file.flush()
score_fn(__lowerCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase_ = get_args()
main(args)
| 16
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16
| 1
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase ):
def run_func(__lowerCamelCase ):
@wraps(_snake_case )
def run_in_eager_mode(*__lowerCamelCase, **__lowerCamelCase ):
return func(*_snake_case, **_snake_case )
@wraps(_snake_case )
@tf.function(experimental_compile=_snake_case )
def run_in_graph_mode(*__lowerCamelCase, **__lowerCamelCase ):
return func(*_snake_case, **_snake_case )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = random.Random()
UpperCAmelCase_ : Optional[Any] = [rng.randint(0, vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_snake_case, shape=(batch_size, sequence_length), dtype=tf.intaa )
class A_ (__snake_case ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = """TensorFlow"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return tf.__version__
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ : List[str] = self._prepare_inference_func(a_ , a_ , a_ )
return self._measure_speed(_inference )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ : Optional[int] = self._prepare_train_func(a_ , a_ , a_ )
return self._measure_speed(_train )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a_ )
UpperCAmelCase_ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ : Tuple = self._prepare_inference_func(a_ , a_ , a_ )
return self._measure_memory(_inference )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a_ )
UpperCAmelCase_ : str = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ : str = self._prepare_train_func(a_ , a_ , a_ )
return self._measure_memory(_train )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase_ : Dict = (
hasattr(a_ , "architectures" )
and isinstance(config.architectures , a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ : List[str] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ : str = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase_ : Optional[Any] = getattr(a_ , a_ )
UpperCAmelCase_ : Dict = model_cls(a_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase_ : Optional[Any] = TF_MODEL_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ : Any = config.vocab_size if hasattr(a_ , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase_ : Union[str, Any] = random_input_ids(a_ , a_ , a_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(a_ , decoder_input_ids=a_ , training=a_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(a_ , training=a_ )
UpperCAmelCase_ : List[Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase_ : Optional[Any] = (
hasattr(a_ , "architectures" )
and isinstance(config.architectures , a_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ : Dict = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase_ : List[str] = getattr(a_ , a_ )
UpperCAmelCase_ : int = model_cls(a_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase_ : Any = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_ )
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ : Optional[Any] = config.vocab_size if hasattr(a_ , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase_ : Union[str, Any] = random_input_ids(a_ , a_ , a_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase_ : str = model(a_ , decoder_input_ids=a_ , labels=a_ , training=a_ )[0]
UpperCAmelCase_ : Any = tf.gradients(a_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase_ : Union[str, Any] = model(a_ , labels=a_ , training=a_ )[0]
UpperCAmelCase_ : List[Any] = tf.gradients(a_ , model.trainable_variables )
return gradients
UpperCAmelCase_ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(a_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ : Union[str, Any] = timeit.repeat(
a_ , repeat=self.args.repeat , number=10 , )
return min(a_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase_ : List[Any] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won\'t log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase_ : Any = '''N/A'''
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ : int = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase_ : Any = nvml.nvmlDeviceGetMemoryInfo(a_ )
UpperCAmelCase_ : List[str] = meminfo.used
UpperCAmelCase_ : Any = Memory(a_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase_ : int = None
else:
UpperCAmelCase_ : Optional[Any] = measure_peak_memory_cpu(a_ )
UpperCAmelCase_ : str = Memory(a_ ) if isinstance(a_ , a_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ : str = stop_memory_tracing(a_ )
if memory is None:
UpperCAmelCase_ : str = summary.total
else:
UpperCAmelCase_ : List[str] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 351
|
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowercase_ , "argv" , lowercase_ ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
| 23
| 0
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def __A ( __lowerCamelCase ) -> Union[str, Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
a = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
a = k.replace(""".attn""" , """.self_attn""" )
a = k.replace("""norm1""" , """self_attn_layer_norm""" )
a = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
a = k.replace("""norm1""" , """self_attn_layer_norm""" )
a = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
a = k.replace("""norm3""" , """final_layer_norm""" )
return k
def __A ( __lowerCamelCase ) -> str:
a = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
a = sd.pop(__lowerCamelCase )
a = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
a = v
__UpperCamelCase : Union[str, Any] = ["START"]
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = torch.load(__lowerCamelCase , map_location="""cpu""" )
a = model["""model"""]
a = BlenderbotConfig.from_json_file(__lowerCamelCase )
a = BlenderbotForConditionalGeneration(__lowerCamelCase )
a = m.model.state_dict().keys()
a = []
a = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
a = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
a = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase : List[str] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 228
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCamelCase : Union[str, Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
a = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
a = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
else:
a = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> int:
a = []
a = fairseq_model.state_dict()
a = hf_model.feature_extractor
a = hf_model.adapter
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
a = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
a = True
if "*" in mapped_key:
a = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
a = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
a = """weight_g"""
elif "weight_v" in name:
a = """weight_v"""
elif "bias" in name:
a = """bias"""
elif "weight" in name:
a = """weight"""
else:
a = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
a = full_name.split("""conv_layers.""" )[-1]
a = name.split(""".""" )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
a = full_name.split("""adaptor.""" )[-1]
a = name.split(""".""" )
if items[1].isdigit():
a = int(items[1] )
else:
a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
a = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
a = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
a = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
a = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
a = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
def __A ( __lowerCamelCase ) -> Tuple:
a , a = emb.weight.shape
a = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
a = emb.weight.data
return lin_layer
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
a = WavaVecaConfig.from_pretrained(
__lowerCamelCase , add_adapter=__lowerCamelCase , adapter_stride=__lowerCamelCase , adapter_kernel_size=__lowerCamelCase , use_auth_token=__lowerCamelCase , output_hidden_size=__lowerCamelCase , )
a = MBartConfig.from_pretrained(__lowerCamelCase )
# load model
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
a = model[0].eval()
# load feature extractor
a = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase , use_auth_token=__lowerCamelCase )
# set weights for wav2vec2 encoder
a = WavaVecaModel(__lowerCamelCase )
recursively_load_weights_wavaveca(model.encoder , __lowerCamelCase )
# load decoder weights
a = MBartForCausalLM(__lowerCamelCase )
a , a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__lowerCamelCase )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
a = SpeechEncoderDecoderModel(encoder=__lowerCamelCase , decoder=__lowerCamelCase )
a = False
a = MBartaaTokenizer(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
a = hf_wavavec.config.to_dict()
a = tokenizer.pad_token_id
a = tokenizer.bos_token_id
a = tokenizer.eos_token_id
a = """mbart50"""
a = """wav2vec2"""
a = tokenizer.eos_token_id
a = 25_0004
a = tokenizer.eos_token_id
a = SpeechEncoderDecoderConfig.from_dict(__lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
feature_extractor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
__UpperCamelCase : int = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 228
| 1
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def snake_case_ (__A : int ) -> str:
__lowerCAmelCase : Union[str, Any] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f'''{test_file} instead.''' )
__lowerCAmelCase : Optional[Any] = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
__lowerCAmelCase : Tuple = components[:-1] + [test_fn.replace(""".py""" , """""" )]
__lowerCAmelCase : Tuple = """.""".join(__A )
return test_module_path
def snake_case_ (__A : Union[str, Any] ) -> int:
__lowerCAmelCase : Tuple = get_module_path(__A )
__lowerCAmelCase : List[Any] = importlib.import_module(__A )
return test_module
def snake_case_ (__A : str ) -> Optional[Any]:
__lowerCAmelCase : int = []
__lowerCAmelCase : str = get_test_module(__A )
for attr in dir(__A ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(__A , __A ) )
# sort with class names
return sorted(__A , key=lambda __A : x.__name__ )
def snake_case_ (__A : List[Any] ) -> Any:
__lowerCAmelCase : Any = []
__lowerCAmelCase : Any = get_test_module(__A )
for attr in dir(__A ):
__lowerCAmelCase : Optional[Any] = getattr(__A , __A )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__lowerCAmelCase : List[str] = getattr(__A , """all_model_classes""" , [] )
if len(__A ) > 0:
test_classes.append(__A )
# sort with class names
return sorted(__A , key=lambda __A : x.__name__ )
def snake_case_ (__A : List[Any] ) -> Tuple:
__lowerCAmelCase : List[str] = get_test_classes(__A )
__lowerCAmelCase : Optional[int] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__A , key=lambda __A : x.__name__ )
def snake_case_ (__A : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase : List[Any] = test_class()
if hasattr(__A , """setUp""" ):
test.setUp()
__lowerCAmelCase : Any = None
if hasattr(__A , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__lowerCAmelCase : Optional[Any] = test.model_tester.__class__
return model_tester
def snake_case_ (__A : Optional[Any] , __A : str ) -> List[Any]:
__lowerCAmelCase : Any = get_test_classes(__A )
__lowerCAmelCase : Tuple = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__A )
# sort with class names
return sorted(__A , key=lambda __A : x.__name__ )
def snake_case_ (__A : List[Any] , __A : Optional[Any] ) -> Any:
__lowerCAmelCase : List[Any] = get_test_classes_for_model(__A , __A )
__lowerCAmelCase : str = []
for test_class in test_classes:
__lowerCAmelCase : List[str] = get_model_tester_from_test_class(__A )
if tester_class is not None:
tester_classes.append(__A )
# sort with class names
return sorted(__A , key=lambda __A : x.__name__ )
def snake_case_ (__A : Union[str, Any] ) -> Tuple:
__lowerCAmelCase : Any = get_test_classes(__A )
__lowerCAmelCase : int = {test_class: get_model_tester_from_test_class(__A ) for test_class in test_classes}
return test_tester_mapping
def snake_case_ (__A : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase : List[str] = get_model_classes(__A )
__lowerCAmelCase : str = {
model_class: get_test_classes_for_model(__A , __A ) for model_class in model_classes
}
return model_test_mapping
def snake_case_ (__A : List[str] ) -> List[str]:
__lowerCAmelCase : Tuple = get_model_classes(__A )
__lowerCAmelCase : Union[str, Any] = {
model_class: get_tester_classes_for_model(__A , __A ) for model_class in model_classes
}
return model_to_tester_mapping
def snake_case_ (__A : List[str] ) -> str:
if isinstance(__A , __A ):
return o
elif isinstance(__A , __A ):
return o.__name__
elif isinstance(__A , (list, tuple) ):
return [to_json(__A ) for x in o]
elif isinstance(__A , __A ):
return {to_json(__A ): to_json(__A ) for k, v in o.items()}
else:
return o
| 139
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : int=18 , lowerCAmelCase : int=30 , lowerCAmelCase : Optional[int]=4_00 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=None , lowerCAmelCase : List[str]=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=True , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 20}
__lowerCAmelCase : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__lowerCAmelCase : str = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : List[str] = image_size
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[str] = max_resolution
__lowerCAmelCase : List[Any] = do_resize
__lowerCAmelCase : Optional[int] = size
__lowerCAmelCase : List[Any] = do_center_crop
__lowerCAmelCase : Optional[Any] = crop_size
__lowerCAmelCase : int = do_flip_channel_order
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_flip_channel_order""" ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
__lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : str = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
__lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 139
| 1
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCamelCase : str = logging.getLogger(__name__)
@dataclass
class __snake_case :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
@dataclass
class __snake_case :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __snake_case ( _UpperCamelCase ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
lowerCAmelCase_ = 'test'
class __snake_case :
@staticmethod
def __a ( _lowercase : List[str] , _lowercase : Union[Split, str] ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __a ( _lowercase : str ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __a ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : Dict=False , _lowercase : int="[CLS]" , _lowercase : List[Any]=1 , _lowercase : Optional[int]="[SEP]" , _lowercase : Any=False , _lowercase : int=False , _lowercase : Tuple=0 , _lowercase : Any=0 , _lowercase : int=-1_00 , _lowercase : List[Any]=0 , _lowercase : str=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {label: i for i, label in enumerate(_UpperCAmelCase )}
SCREAMING_SNAKE_CASE__ = []
for ex_index, example in enumerate(_UpperCAmelCase ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" , _UpperCAmelCase , len(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_UpperCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_UpperCAmelCase ) > 0:
tokens.extend(_UpperCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_UpperCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ = tokenizer.num_special_tokens_to_add()
if len(_UpperCAmelCase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ = [sequence_a_segment_id] * len(_UpperCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ = [1 if mask_padding_with_zero else 0] * len(_UpperCAmelCase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ = max_seq_length - len(_UpperCAmelCase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(_UpperCAmelCase ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(_UpperCAmelCase ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(_UpperCAmelCase ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(_UpperCAmelCase ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(_UpperCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ = None
features.append(
InputFeatures(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , label_ids=_UpperCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __snake_case ( _UpperCamelCase ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[Any]=False , _lowercase : Split = Split.train , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(
_UpperCAmelCase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ = torch.load(_UpperCAmelCase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _UpperCAmelCase )
def __len__( self : Any ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowercase : Dict ):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __snake_case :
lowerCAmelCase_ = 42
lowerCAmelCase_ = -1_00
def __init__( self : str , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : int=False , _lowercase : Split = Split.train , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : str , _lowercase : int ):
"""simple docstring"""
return self.features[i]
| 219
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89
| 0
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
UpperCamelCase__ = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = "cpu"
UpperCamelCase__ = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
UpperCamelCase__ = "path-to-your-trained-model"
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
UpperCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCamelCase__ = pipe.to(device)
# to channels last
UpperCamelCase__ = pipe.unet.to(memory_format=torch.channels_last)
UpperCamelCase__ = pipe.vae.to(memory_format=torch.channels_last)
UpperCamelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
UpperCamelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
UpperCamelCase__ = torch.randn(2, 4, 64, 64)
UpperCamelCase__ = torch.rand(1) * 999
UpperCamelCase__ = torch.randn(2, 77, 768)
UpperCamelCase__ = (sample, timestep, encoder_hidden_status)
try:
UpperCamelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
UpperCamelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
UpperCamelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
UpperCamelCase__ = 666
UpperCamelCase__ = torch.Generator(device).manual_seed(seed)
UpperCamelCase__ = {"generator": generator}
if args.steps is not None:
UpperCamelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
UpperCamelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 352
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Any = """xlnet"""
snake_case : Optional[Any] = ["""mems"""]
snake_case : Any = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowerCAmelCase=32000 , __lowerCAmelCase=1024 , __lowerCAmelCase=24 , __lowerCAmelCase=16 , __lowerCAmelCase=4096 , __lowerCAmelCase="gelu" , __lowerCAmelCase=True , __lowerCAmelCase="bi" , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=-1 , __lowerCAmelCase=False , __lowerCAmelCase="last" , __lowerCAmelCase=True , __lowerCAmelCase="tanh" , __lowerCAmelCase=0.1 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = n_layer
UpperCamelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCamelCase__ = d_model // n_head
UpperCamelCase__ = ff_activation
UpperCamelCase__ = d_inner
UpperCamelCase__ = untie_r
UpperCamelCase__ = attn_type
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = dropout
UpperCamelCase__ = mem_len
UpperCamelCase__ = reuse_len
UpperCamelCase__ = bi_data
UpperCamelCase__ = clamp_len
UpperCamelCase__ = same_length
UpperCamelCase__ = summary_type
UpperCamelCase__ = summary_use_proj
UpperCamelCase__ = summary_activation
UpperCamelCase__ = summary_last_dropout
UpperCamelCase__ = start_n_top
UpperCamelCase__ = end_n_top
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , __lowerCAmelCase , )
UpperCamelCase__ = kwargs["""use_cache"""]
UpperCamelCase__ = use_mems_eval
UpperCamelCase__ = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def _lowerCamelCase ( self ):
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _lowerCamelCase ( self , __lowerCAmelCase ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 87
| 0
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = PriorTransformer
lowerCAmelCase : int = "hidden_states"
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = 4
lowercase__ : List[Any] = 8
lowercase__ : Any = 7
lowercase__ : int = floats_tensor((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : Any = floats_tensor((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : int = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase ( self : str ,_snake_case : str=0 ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(_snake_case )
lowercase__ : Optional[Any] = 4
lowercase__ : Optional[int] = 8
lowercase__ : Any = 7
lowercase__ : str = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return (4, 8)
@property
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return (4, 8)
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
lowercase__ : Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Tuple = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' ,output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(_snake_case )
lowercase__ : Optional[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : Any = self.prepare_init_args_and_inputs_for_common()
lowercase__ : List[str] = self.model_class(**_snake_case )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] ,_snake_case )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
lowercase__ : Dict = model.to(_snake_case )
if hasattr(_snake_case ,'''set_default_attn_processor''' ):
model.set_default_attn_processor()
lowercase__ : Dict = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ : Optional[int] = model(**_snake_case )[0]
lowercase__ : str = output[0, :5].flatten().cpu()
print(_snake_case )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ : Optional[Any] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(_snake_case ,_snake_case ,rtol=1e-2 ) )
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ,_snake_case : List[str]=1 ,_snake_case : Tuple=768 ,_snake_case : List[Any]=77 ,_snake_case : Optional[Any]=0 ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(_snake_case )
lowercase__ : Tuple = batch_size
lowercase__ : List[str] = embedding_dim
lowercase__ : Tuple = num_embeddings
lowercase__ : int = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : str = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ,_snake_case : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' ,subfolder='''prior''' )
model.to(_snake_case )
lowercase__ : List[str] = self.get_dummy_seed_input(seed=_snake_case )
with torch.no_grad():
lowercase__ : List[str] = model(**_snake_case )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ : List[str] = sample[0, :8].flatten().cpu()
print(_snake_case )
lowercase__ : Optional[Any] = torch.tensor(_snake_case )
assert torch_all_close(_snake_case ,_snake_case ,atol=1e-3 )
| 16
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : str
lowerCamelCase_ : List[str]
lowerCamelCase_ : Optional[List[str]]
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : List[int]
lowerCamelCase_ : List[int]
lowerCamelCase_ : Optional[List[int]] = None
lowerCamelCase_ : Optional[List[int]] = None
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : str = '''train'''
lowerCamelCase_ : List[str] = '''dev'''
lowerCamelCase_ : List[Any] = '''test'''
class __lowerCAmelCase :
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def lowerCamelCase (__magic_name__ ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__=1 , __magic_name__="[SEP]" , __magic_name__=False , __magic_name__=False , __magic_name__=0 , __magic_name__=0 , __magic_name__=-100 , __magic_name__=0 , __magic_name__=True , ) -> List[InputFeatures]:
'''simple docstring'''
snake_case_ : Optional[int] = {label: i for i, label in enumerate(__magic_name__ )}
snake_case_ : Dict = []
for ex_index, example in enumerate(__magic_name__ ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' , __magic_name__ , len(__magic_name__ ) )
snake_case_ : List[str] = []
snake_case_ : List[str] = []
for word, label in zip(example.words , example.labels ):
snake_case_ : Optional[Any] = tokenizer.tokenize(__magic_name__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__magic_name__ ) > 0:
tokens.extend(__magic_name__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__magic_name__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
snake_case_ : Union[str, Any] = tokenizer.num_special_tokens_to_add()
if len(__magic_name__ ) > max_seq_length - special_tokens_count:
snake_case_ : str = tokens[: (max_seq_length - special_tokens_count)]
snake_case_ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
snake_case_ : Union[str, Any] = [sequence_a_segment_id] * len(__magic_name__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
snake_case_ : Union[str, Any] = [cls_token] + tokens
snake_case_ : List[Any] = [pad_token_label_id] + label_ids
snake_case_ : Optional[Any] = [cls_token_segment_id] + segment_ids
snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
snake_case_ : int = [1 if mask_padding_with_zero else 0] * len(__magic_name__ )
# Zero-pad up to the sequence length.
snake_case_ : Optional[int] = max_seq_length - len(__magic_name__ )
if pad_on_left:
snake_case_ : Optional[Any] = ([pad_token] * padding_length) + input_ids
snake_case_ : Optional[int] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
snake_case_ : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
snake_case_ : Dict = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__magic_name__ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ : int = None
features.append(
InputFeatures(
input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , label_ids=__magic_name__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[InputFeatures]
lowerCamelCase_ : int = nn.CrossEntropyLoss().ignore_index
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = os.path.join(
__magic_name__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__magic_name__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : Dict = cached_features_file + '''.lock'''
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
snake_case_ : Dict = torch.load(__magic_name__ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
snake_case_ : Any = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ : int = token_classification_task.convert_examples_to_features(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , __magic_name__ )
def __len__(self ) -> Optional[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__(self , __magic_name__ ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase :
lowerCamelCase_ : List[InputFeatures]
lowerCamelCase_ : int = -100
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ : int = token_classification_task.convert_examples_to_features(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ : Optional[Any] = tf.data.Dataset.from_generator(
__magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
snake_case_ : int = tf.data.Dataset.from_generator(
__magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__(self ) -> str:
'''simple docstring'''
return len(self.features )
def __getitem__(self , __magic_name__ ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 279
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase_ : List[Any] = {'''vocab_file''': '''sentencepiece.model'''}
UpperCamelCase_ : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
UpperCamelCase_ : Any = {
'''google/rembert''': 256,
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE="[CLS]" ,_SCREAMING_SNAKE_CASE="[SEP]" ,_SCREAMING_SNAKE_CASE="[UNK]" ,_SCREAMING_SNAKE_CASE="[SEP]" ,_SCREAMING_SNAKE_CASE="[PAD]" ,_SCREAMING_SNAKE_CASE="[CLS]" ,_SCREAMING_SNAKE_CASE="[MASK]" ,**_SCREAMING_SNAKE_CASE ,) -> Union[str, Any]:
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE ,remove_space=_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
_snake_case = do_lower_case
_snake_case = remove_space
_snake_case = keep_accents
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor()
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ) -> List[Any]:
return len(self.sp_model )
def _lowercase ( self ) -> Any:
_snake_case = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
_snake_case = d
_snake_case = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> List[Any]:
_snake_case = self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE )
return pieces
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Any:
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
_snake_case = self.sp_model.decode_pieces(_SCREAMING_SNAKE_CASE )
return out_string
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("Vocabulary path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) )
return
_snake_case = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 142
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def __a ( _UpperCamelCase: Callable[[int | float], int | float] , _UpperCamelCase: int | float , _UpperCamelCase: int | float , _UpperCamelCase: int = 100 , ) -> float:
"""simple docstring"""
_snake_case = x_start
_snake_case = fnc(_UpperCamelCase )
_snake_case = 0.0
for _ in range(_UpperCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_snake_case = (x_end - x_start) / steps + xa
_snake_case = fnc(_UpperCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_snake_case = xa
_snake_case = fxa
return area
if __name__ == "__main__":
def __a ( _UpperCamelCase: Any ) -> Optional[int]:
"""simple docstring"""
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
UpperCamelCase_ : Optional[int] = 10
while i <= 100000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 142
| 1
|
'''simple docstring'''
__SCREAMING_SNAKE_CASE :int = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__SCREAMING_SNAKE_CASE :Any = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCAmelCase_ ( __lowercase : dict[int, list[int]] , __lowercase : int , __lowercase : list[bool] ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = True
_UpperCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__lowercase , __lowercase , __lowercase )
order.append(__lowercase )
return order
def UpperCAmelCase_ ( __lowercase : dict[int, list[int]] , __lowercase : int , __lowercase : list[bool] ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = True
_UpperCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__lowercase , __lowercase , __lowercase )
return component
def UpperCAmelCase_ ( __lowercase : dict[int, list[int]] ) -> list[list[int]]:
'''simple docstring'''
_UpperCAmelCase = len(__lowercase ) * [False]
_UpperCAmelCase = {vert: [] for vert in range(len(__lowercase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__lowercase )
_UpperCAmelCase = []
for i, was_visited in enumerate(__lowercase ):
if not was_visited:
order += topology_sort(__lowercase , __lowercase , __lowercase )
_UpperCAmelCase = []
_UpperCAmelCase = len(__lowercase ) * [False]
for i in range(len(__lowercase ) ):
_UpperCAmelCase = order[len(__lowercase ) - i - 1]
if not visited[vert]:
_UpperCAmelCase = find_components(__lowercase , __lowercase , __lowercase )
components_list.append(__lowercase )
return components_list
| 22
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self : Union[str, Any] , __snake_case : UNetaDModel , __snake_case : ScoreSdeVeScheduler ) -> int:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : int = 1 , __snake_case : int = 2000 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , **__snake_case : Optional[int] , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : str = self.unet.config.sample_size
UpperCAmelCase : Union[str, Any] = (batch_size, 3, img_size, img_size)
UpperCAmelCase : int = self.unet
UpperCAmelCase : Any = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma
UpperCAmelCase : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(__snake_case )
self.scheduler.set_sigmas(__snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase : Any = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase : Union[str, Any] = self.unet(__snake_case , __snake_case ).sample
UpperCAmelCase : Optional[Any] = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample
# prediction step
UpperCAmelCase : Optional[Any] = model(__snake_case , __snake_case ).sample
UpperCAmelCase : List[str] = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean
UpperCAmelCase : int = sample_mean.clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Optional[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__snake_case )
| 23
| 0
|
'''simple docstring'''
from functools import lru_cache
@lru_cache
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__snake_case )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : Optional[int] )-> Dict:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> str:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : str )-> Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 3
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def A_ ( snake_case = 2000000 ):
SCREAMING_SNAKE_CASE:list[int] = [0]
SCREAMING_SNAKE_CASE:int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
SCREAMING_SNAKE_CASE:int = 0
# the area corresponding to the grid that gives the product closest to target
SCREAMING_SNAKE_CASE:int = 0
# an estimate of b, using the quadratic formula
SCREAMING_SNAKE_CASE:float
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE:int
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE:int
# the triangle number corresponding to b_floor
SCREAMING_SNAKE_CASE:int
# the triangle number corresponding to b_ceil
SCREAMING_SNAKE_CASE:int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
SCREAMING_SNAKE_CASE:List[str] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
SCREAMING_SNAKE_CASE:Dict = floor(snake_case )
SCREAMING_SNAKE_CASE:Any = ceil(snake_case )
SCREAMING_SNAKE_CASE:Tuple = triangle_numbers[b_floor]
SCREAMING_SNAKE_CASE:int = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE:List[Any] = triangle_b_first_guess * triangle_a
SCREAMING_SNAKE_CASE:Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE:str = triangle_b_second_guess * triangle_a
SCREAMING_SNAKE_CASE:Union[str, Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 139
|
'''simple docstring'''
from __future__ import annotations
def A_ ( snake_case , snake_case , snake_case , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139
| 1
|
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def __lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
snake_case : Tuple = parser.parse_args()
return args.f
def __lowerCAmelCase ( lowercase : List[str] , lowercase : Any="eval" ) -> List[Any]:
"""simple docstring"""
snake_case : List[Any] = os.path.join(lowercase , F'{split}_results.json' )
if os.path.exists(lowercase ):
with open(lowercase , "r" ) as f:
return json.load(lowercase )
raise ValueError(F'can\'t find {path}' )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowerCAmelCase ( snake_case_ ):
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[Any] = self.get_auto_remove_tmp_dir()
snake_case : Optional[int] = F'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(UpperCamelCase__ , "argv" , UpperCamelCase__ ):
run_flax_glue.main()
snake_case : Optional[Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Dict = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(UpperCamelCase__ , "argv" , UpperCamelCase__ ):
run_clm_flax.main()
snake_case : Optional[Any] = get_results(UpperCamelCase__ )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = self.get_auto_remove_tmp_dir()
snake_case : Optional[Any] = F'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(UpperCamelCase__ , "argv" , UpperCamelCase__ ):
run_summarization_flax.main()
snake_case : int = get_results(UpperCamelCase__ , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = self.get_auto_remove_tmp_dir()
snake_case : Tuple = F'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(UpperCamelCase__ , "argv" , UpperCamelCase__ ):
run_mlm_flax.main()
snake_case : Dict = get_results(UpperCamelCase__ )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = self.get_auto_remove_tmp_dir()
snake_case : Union[str, Any] = F'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(UpperCamelCase__ , "argv" , UpperCamelCase__ ):
run_ta_mlm_flax.main()
snake_case : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = 7 if get_gpu_count() > 1 else 2
snake_case : List[str] = self.get_auto_remove_tmp_dir()
snake_case : Optional[int] = F'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(UpperCamelCase__ , "argv" , UpperCamelCase__ ):
run_flax_ner.main()
snake_case : Tuple = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : str = self.get_auto_remove_tmp_dir()
snake_case : int = F'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(UpperCamelCase__ , "argv" , UpperCamelCase__ ):
run_qa.main()
snake_case : Any = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 112
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MgpstrTokenizer
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : List[str] = False
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case : List[str] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case : Union[str, Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : int = "tester"
snake_case : List[str] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : int = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
snake_case : List[Any] = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case : str = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
snake_case : List[Any] = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
snake_case ,snake_case : Union[str, Any] = self.get_input_output_texts(UpperCamelCase__ )
snake_case : Dict = tokenizer.tokenize(UpperCamelCase__ )
snake_case : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
snake_case : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertNotEqual(len(UpperCamelCase__ ) , 0 )
snake_case : Tuple = tokenizer.decode(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , UpperCamelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
| 112
| 1
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
lowerCAmelCase__ : Optional[int] = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCAmelCase__ : int = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
lowerCAmelCase__ : List[str] = primes[:idx]
break
lowerCAmelCase__ : Optional[Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCAmelCase__ : str = False
for r in range(_lowerCamelCase ):
lowerCAmelCase__ : int = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCAmelCase__ : Tuple = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __SCREAMING_SNAKE_CASE ( ):
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 106
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case_ ( unittest.TestCase ):
def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]:
lowercase__ : Dict = parent
lowercase__ : Dict = batch_size
lowercase__ : Tuple = seq_length
lowercase__ : Dict = is_training
lowercase__ : Dict = use_attention_mask
lowercase__ : Tuple = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : List[str] = num_choices
lowercase__ : str = rescale_embeddings
lowercase__ : Optional[Any] = attention_type
lowercase__ : Optional[int] = use_bias
lowercase__ : Optional[int] = block_size
lowercase__ : str = num_random_blocks
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : int = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__A : List[str] = False
__A : Any = False
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : List[str] ) -> Any:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Tuple ) -> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
super().test_hidden_states_output()
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for model_class_name in self.all_model_classes:
lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[int]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : str ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
lowercase__ : int = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 87
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( _UpperCAmelCase ):
A__ : List[str] =(DPMSolverSinglestepScheduler,)
A__ : int =(("""num_inference_steps""", 2_5),)
def A_ ( self : Optional[Any] , **UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = {
'num_train_timesteps': 1000,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**UpperCAmelCase_ )
return config
def A_ ( self : int , UpperCAmelCase_ : int=0 , **UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE__ = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__ = kwargs.pop('num_inference_steps' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.dummy_sample
SCREAMING_SNAKE_CASE__ = 0.1 * sample
SCREAMING_SNAKE_CASE__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = scheduler_class.from_pretrained(UpperCAmelCase_ )
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = sample, sample
for t in range(UpperCAmelCase_ , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
SCREAMING_SNAKE_CASE__ = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A_ ( self : List[Any] ):
pass
def A_ ( self : Tuple , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__ = kwargs.pop('num_inference_steps' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.dummy_sample
SCREAMING_SNAKE_CASE__ = 0.1 * sample
SCREAMING_SNAKE_CASE__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = scheduler_class.from_pretrained(UpperCAmelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
SCREAMING_SNAKE_CASE__ = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A_ ( self : int , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : List[str] ):
if scheduler is None:
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
return sample
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE__ = 50
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1e-3
def A_ ( self : int ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def A_ ( self : Optional[int] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
SCREAMING_SNAKE_CASE__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE__ = self.full_loop(scheduler=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
SCREAMING_SNAKE_CASE__ = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ = self.full_loop(scheduler=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def A_ ( self : Optional[Any] ):
self.check_over_configs(thresholding=UpperCAmelCase_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase_ , solver_type=UpperCAmelCase_ , )
def A_ ( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def A_ ( self : Dict ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase_ , solver_type=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , algorithm_type=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = self.full_loop(
solver_order=UpperCAmelCase_ , solver_type=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , algorithm_type=UpperCAmelCase_ , )
assert not torch.isnan(UpperCAmelCase_ ).any(), "Samples have nan numbers"
def A_ ( self : Any ):
self.check_over_configs(lower_order_final=UpperCAmelCase_ )
self.check_over_configs(lower_order_final=UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def A_ ( self : List[Any] ):
self.check_over_configs(variance_type=UpperCAmelCase_ )
self.check_over_configs(variance_type='learned_range' )
def A_ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCAmelCase_ , time_step=0 )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.full_loop()
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = self.full_loop(use_karras_sigmas=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1e-3
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.full_loop(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1e-3
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1e-3
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE__ = scheduler_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 359
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__snake_case = False
try:
__snake_case = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowercase__ :
def __init__( self : Optional[Any] , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = [] ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = choices
SCREAMING_SNAKE_CASE__ = prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ = '*'
else:
SCREAMING_SNAKE_CASE__ = '➔ '
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCAmelCase_ )
else:
forceWrite(self.choices[index] , UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(UpperCAmelCase_ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def A_ ( self : List[Any] , UpperCAmelCase_ : Direction , UpperCAmelCase_ : int = 1 ):
SCREAMING_SNAKE_CASE__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCAmelCase_ )
move_cursor(UpperCAmelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def A_ ( self : Optional[Any] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def A_ ( self : List[Any] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def A_ ( self : Dict ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def A_ ( self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCAmelCase_ )] for number in range(10 )] )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCAmelCase_ )
else:
return
else:
return
def A_ ( self : Optional[int] , UpperCAmelCase_ : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
SCREAMING_SNAKE_CASE__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCAmelCase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE__ = int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE__ = default_choice
else:
SCREAMING_SNAKE_CASE__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(UpperCAmelCase_ , '\n' )
return choice
| 169
| 0
|
from pathlib import Path
import numpy as np
from PIL import Image
def _a ( UpperCAmelCase ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _a ( UpperCAmelCase ) -> np.ndarray:
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def _a ( UpperCAmelCase , UpperCAmelCase ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase__ : Tuple = np.zeros_like(UpperCAmelCase )
lowerCamelCase__ : int = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCamelCase__ : List[str] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCamelCase__ : List[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCamelCase__ : Optional[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_A : int = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
_A : Tuple = np.array(Image.open(lena_path))
# kernel to be applied
_A : Tuple = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_A : Any = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_A : Union[str, Any] = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 142
|
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(1_00, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 142
| 1
|
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = len(_A )
_lowerCAmelCase : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCAmelCase : Dict = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCAmelCase : int = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCAmelCase : List[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCAmelCase : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 25
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : List[Any] =IFInpaintingPipeline
lowercase_ : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowercase_ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ : str =PipelineTesterMixin.required_optional_params - {'''latents'''}
def A__ ( self):
return self._get_dummy_components()
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 101
|
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowercase : str = get_tests_dir('fixtures/vocab.json')
lowercase : int = get_tests_dir('fixtures')
class A ( unittest.TestCase ):
__magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = 0
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : Union[str, Any] = WavaVecaConfig()
A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : Dict = WavaVecaFeatureExtractor()
A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in tokenizer
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f:
A : Dict = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : List[Any] = WavaVecaFeatureExtractor()
A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in feature extractor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f:
A : str = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write('''{}''' )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A : List[str] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A : Tuple = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A : List[str] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
A : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE )
A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
class A ( __snake_case ):
__magic_name__ = False
class A ( __snake_case ):
__magic_name__ = False
class A ( __snake_case ):
__magic_name__ = '''AutoFeatureExtractor'''
__magic_name__ = '''AutoTokenizer'''
__magic_name__ = False
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local classes.
A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A : Optional[int] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A : Tuple = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class A ( unittest.TestCase ):
__magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def __lowerCAmelCase ( cls ) -> Dict:
"""simple docstring"""
A : Optional[int] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE )
@classmethod
def __lowerCAmelCase ( cls ) -> Any:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , )
A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : str = CustomTokenizer(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f:
A : Dict = json.load(SCREAMING_SNAKE_CASE )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 3
| 0
|
from collections import defaultdict
from math import ceil, sqrt
def UpperCamelCase ( _A = 1000000, _A = 10 ):
"""simple docstring"""
__magic_name__ : defaultdict = defaultdict(_A )
for outer_width in range(3, (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__magic_name__ : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ), 1 )
else:
__magic_name__ : Dict = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_A, outer_width - 1, 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 138
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase ( _A ):
"""simple docstring"""
if not isinstance(_A, _A ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__magic_name__ : Dict = precision
__magic_name__ : str = ceil(precision / 14 )
__magic_name__ : List[str] = 426880 * Decimal(10005 ).sqrt()
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = 13591409
__magic_name__ : Tuple = Decimal(_A )
for k in range(1, _A ):
__magic_name__ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_A ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__magic_name__: Tuple = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 138
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
__SCREAMING_SNAKE_CASE : int = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(lowerCAmelCase__ )
from datasets import load_dataset
__SCREAMING_SNAKE_CASE : List[str] = load_dataset("""nielsr/rvlcdip-demo""" )
__SCREAMING_SNAKE_CASE : List[Any] = dataset["""train"""][0]["""image"""].convert("""RGB""" )
__SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = outputs.logits
__SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=lowerCAmelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 112
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase__ : Tuple = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ : List[Any] = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : Any = bs[:]
__SCREAMING_SNAKE_CASE : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Dict = set()
__SCREAMING_SNAKE_CASE : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : str = char
return pairs
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : Any = PRETRAINED_VOCAB_FILES_MAP
_A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]="replace" , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : List[str]="</s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Union[str, Any]="<unk>" , lowerCAmelCase__ : Union[str, Any]="<pad>" , lowerCAmelCase__ : int="<mask>" , lowerCAmelCase__ : str=False , **lowerCAmelCase__ : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : str = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Dict = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : Union[str, Any] = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__SCREAMING_SNAKE_CASE : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__SCREAMING_SNAKE_CASE : int = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : int = {}
__SCREAMING_SNAKE_CASE : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : str = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = bigram
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Optional[int] = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : Dict = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Dict = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Tuple = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = word
return word
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = """""".join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__SCREAMING_SNAKE_CASE : Tuple = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__SCREAMING_SNAKE_CASE : List[Any] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=False , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : int = """ """ + text
return (text, kwargs)
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super()._pad(
encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
__SCREAMING_SNAKE_CASE : Tuple = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__SCREAMING_SNAKE_CASE : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__SCREAMING_SNAKE_CASE : str = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE : Dict = len(lowerCAmelCase__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__SCREAMING_SNAKE_CASE : Dict = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 112
| 1
|
def _UpperCAmelCase (UpperCamelCase_ : dict ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
# edges = list of graph's edges
_lowerCAmelCase : int = get_edges(__A )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_lowerCAmelCase , _lowerCAmelCase : List[str] = edges.pop()
chosen_vertices.add(__A )
chosen_vertices.add(__A )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__A )
return chosen_vertices
def _UpperCAmelCase (UpperCamelCase_ : dict ):
'''simple docstring'''
_lowerCAmelCase : str = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 357
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _UpperCAmelCase (UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(UpperCamelCase_ ) + square(UpperCamelCase_ )) / (2 * square(UpperCamelCase_ )) )
return g
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[Any] = height - k_size + 1
_lowerCAmelCase : Tuple = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(UpperCamelCase_ ) , range(UpperCamelCase_ ) ):
_lowerCAmelCase : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : Optional[Any] = gen_gaussian_kernel(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Dict = ravel(UpperCamelCase_ )
# reshape and get the dst image
_lowerCAmelCase : List[str] = dot(UpperCamelCase_ , UpperCamelCase_ ).reshape(UpperCamelCase_ , UpperCamelCase_ ).astype(UpperCamelCase_ )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase : int = imread(R"../image_data/lena.jpg")
# turn image in gray scale value
_lowerCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase : int = gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase : List[str] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 159
| 0
|
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = 0 , lowercase_ = 0 ) -> Optional[int]:
"""simple docstring"""
A__ = right or len(_lowerCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_lowerCAmelCase , _lowerCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 169
| 0
|
from collections.abc import Callable
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : float = a
_A : float = b
if function(snake_case_ ) == 0: # one of the a or b is a root for the function
return a
elif function(snake_case_ ) == 0:
return b
elif (
function(snake_case_ ) * function(snake_case_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
_A : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(snake_case_ ) == 0:
return mid
elif function(snake_case_ ) * function(snake_case_ ) < 0:
_A : List[str] = mid
else:
_A : str = mid
_A : Dict = start + (end - start) / 2.0
return mid
def lowerCAmelCase_ ( snake_case_ ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 343
|
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
_A : int
_A : int
_A : int
_A : int
_A : int
_A : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
_A : Dict = matrix[row][col]
_A : List[Any] = vector[row][0]
_A : List[Any] = 0
_A : Optional[Any] = 0
while row < size and col < size:
# pivoting
_A : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_,snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A , _A : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1,snake_case_ ):
_A : str = augmented[rowa][col] / augmented[row][col]
_A : List[Any] = 0
for cola in range(col + 1,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1,snake_case_ ):
for row in range(snake_case_ ):
_A : int = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row],10 )] for row in range(snake_case_ )
]
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
_A : Matrix = [[0] for _ in range(snake_case_ )]
_A : Matrix
_A : int
_A : int
_A : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
_A : str = (x_val + 1) ** (size - col - 1)
_A : List[str] = y_val
_A : Any = solve(snake_case_,snake_case_ )
def interpolated_func(snake_case_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowerCAmelCase_ ( snake_case_ ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( snake_case_ = question_function,snake_case_ = 10 ):
_A : list[int] = [func(snake_case_ ) for x_val in range(1,order + 1 )]
_A : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1,order + 1 )
]
_A : int = 0
_A : Callable[[int], int]
_A : int
for poly in polynomials:
_A : Optional[int] = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowercase ( a__ ):
_a = '''wavlm'''
def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.1 , _a=0.02 , _a=1e-5 , _a="group" , _a="gelu" , _a=(512, 512, 512, 512, 512, 512, 512) , _a=(5, 2, 2, 2, 2, 2, 2) , _a=(10, 3, 3, 3, 3, 2, 2) , _a=False , _a=128 , _a=16 , _a=320 , _a=800 , _a=False , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=320 , _a=2 , _a=0.1 , _a=100 , _a=256 , _a=256 , _a=0.1 , _a="mean" , _a=False , _a=False , _a=256 , _a=(512, 512, 512, 512, 1500) , _a=(5, 3, 3, 1, 1) , _a=(1, 2, 3, 1, 1) , _a=512 , _a=80 , _a=0 , _a=1 , _a=2 , _a=False , _a=3 , _a=2 , _a=3 , _a=None , **_a , ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
_A : Dict = hidden_size
_A : Any = feat_extract_norm
_A : str = feat_extract_activation
_A : Any = list(SCREAMING_SNAKE_CASE__ )
_A : Any = list(SCREAMING_SNAKE_CASE__ )
_A : List[str] = list(SCREAMING_SNAKE_CASE__ )
_A : List[Any] = conv_bias
_A : Dict = num_buckets
_A : List[str] = max_bucket_distance
_A : Union[str, Any] = num_conv_pos_embeddings
_A : Optional[int] = num_conv_pos_embedding_groups
_A : Optional[Any] = len(self.conv_dim )
_A : Optional[int] = num_hidden_layers
_A : Tuple = intermediate_size
_A : List[str] = hidden_act
_A : Tuple = num_attention_heads
_A : Any = hidden_dropout
_A : int = attention_dropout
_A : List[Any] = activation_dropout
_A : Tuple = feat_proj_dropout
_A : Any = final_dropout
_A : Optional[int] = layerdrop
_A : int = layer_norm_eps
_A : Optional[Any] = initializer_range
_A : Optional[Any] = num_ctc_classes
_A : List[str] = vocab_size
_A : Optional[Any] = do_stable_layer_norm
_A : List[str] = use_weighted_layer_sum
_A : Optional[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A : Optional[Any] = apply_spec_augment
_A : List[str] = mask_time_prob
_A : Any = mask_time_length
_A : Tuple = mask_time_min_masks
_A : Optional[int] = mask_feature_prob
_A : List[str] = mask_feature_length
# parameters for pretraining with codevector quantized representations
_A : Any = num_codevectors_per_group
_A : str = num_codevector_groups
_A : Tuple = contrastive_logits_temperature
_A : Any = num_negatives
_A : Dict = codevector_dim
_A : List[str] = proj_codevector_dim
_A : Union[str, Any] = diversity_loss_weight
# ctc loss
_A : List[Any] = ctc_loss_reduction
_A : Tuple = ctc_zero_infinity
# adapter
_A : Dict = add_adapter
_A : List[str] = adapter_kernel_size
_A : Optional[int] = adapter_stride
_A : List[str] = num_adapter_layers
_A : List[str] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_A : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_A : List[str] = list(SCREAMING_SNAKE_CASE__ )
_A : int = list(SCREAMING_SNAKE_CASE__ )
_A : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
_A : int = xvector_output_dim
@property
def a__ ( self ) -> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 26
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25
| 0
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : Any ):
# Load configuration defined in the metadata file
with open(_lowerCAmelCase ) as metadata_file:
lowerCAmelCase = json.load(_lowerCAmelCase )
lowerCAmelCase = LukeConfig(use_entity_aware_attention=_lowerCAmelCase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
lowerCAmelCase = torch.load(_lowerCAmelCase , map_location='cpu' )["""module"""]
# Load the entity vocab file
lowerCAmelCase = load_original_entity_vocab(_lowerCAmelCase )
# add an entry for [MASK2]
lowerCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase = AddedToken('<ent>' , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
lowerCAmelCase = AddedToken('<ent2>' , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'r' ) as f:
lowerCAmelCase = json.load(_lowerCAmelCase )
lowerCAmelCase = """MLukeTokenizer"""
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase = MLukeTokenizer.from_pretrained(_lowerCAmelCase )
# Initialize the embeddings of the special tokens
lowerCAmelCase = tokenizer.convert_tokens_to_ids(['@'] )[0]
lowerCAmelCase = tokenizer.convert_tokens_to_ids(['#'] )[0]
lowerCAmelCase = state_dict["""embeddings.word_embeddings.weight"""]
lowerCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
lowerCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
lowerCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCAmelCase = state_dict[bias_name]
lowerCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase = f'''encoder.layer.{layer_index}.attention.self.'''
lowerCAmelCase = state_dict[prefix + matrix_name]
lowerCAmelCase = state_dict[prefix + matrix_name]
lowerCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowerCAmelCase = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowerCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCAmelCase = state_dict["""entity_predictions.bias"""]
lowerCAmelCase = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowerCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCAmelCase = LukeForMaskedLM(config=_lowerCAmelCase ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
lowerCAmelCase = state_dict[key]
else:
lowerCAmelCase = state_dict[key]
lowerCAmelCase = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if set(_lowerCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(_lowerCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCAmelCase = MLukeTokenizer.from_pretrained(_lowerCAmelCase , task='entity_classification' )
lowerCAmelCase = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
lowerCAmelCase = (0, 9)
lowerCAmelCase = tokenizer(_lowerCAmelCase , entity_spans=[span] , return_tensors='pt' )
lowerCAmelCase = model(**_lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase = torch.Size((1, 33, 768) )
lowerCAmelCase = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase = torch.Size((1, 1, 768) )
lowerCAmelCase = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCAmelCase = MLukeTokenizer.from_pretrained(_lowerCAmelCase )
lowerCAmelCase = """Tokyo is the capital of <mask>."""
lowerCAmelCase = (24, 30)
lowerCAmelCase = tokenizer(_lowerCAmelCase , entity_spans=[span] , return_tensors='pt' )
lowerCAmelCase = model(**_lowerCAmelCase )
lowerCAmelCase = encoding["""input_ids"""][0].tolist()
lowerCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
lowerCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_lowerCAmelCase )
lowerCAmelCase = outputs.entity_logits[0][0].argmax().item()
lowerCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(_lowerCAmelCase ) )
model.save_pretrained(_lowerCAmelCase )
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ["""[MASK]""", """[PAD]""", """[UNK]"""]
lowerCAmelCase = [json.loads(_lowerCAmelCase ) for line in open(_lowerCAmelCase )]
lowerCAmelCase = {}
for entry in data:
lowerCAmelCase = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCAmelCase = entity_id
break
lowerCAmelCase = f'''{language}:{entity_name}'''
lowerCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__snake_case =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 371
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[str]=1_0 , UpperCAmelCase__ : Optional[int]=[8, 1_6, 3_2, 6_4] , UpperCAmelCase__ : str=[1, 1, 2, 1] , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict="relu" , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Tuple=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Union[str, Any]=[2, 3, 4] , UpperCAmelCase__ : Any=1 , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = embeddings_size
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = scope
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
lowerCAmelCase = num_groups
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : int ) -> int:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Any:
lowerCAmelCase = BitModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = BitForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> Any:
lowerCAmelCase = BitBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase = None
lowerCAmelCase = BitBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Optional[int] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCamelCase : Optional[Any] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : int = False
lowerCamelCase : int = False
lowerCamelCase : str = False
lowerCamelCase : Optional[int] = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = BitModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
return
@unittest.skip(reason='Bit does not output attentions' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(config=UpperCAmelCase__ )
for name, module in model.named_modules():
if isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
def check_hidden_states_output(UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ):
lowerCAmelCase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase = layer_type
lowerCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
pass
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = BitModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : int ) -> Optional[int]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase__ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**UpperCAmelCase__ )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@require_torch
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : int = (BitBackbone,) if is_torch_available() else ()
lowerCamelCase : Dict = BitConfig
lowerCamelCase : Tuple = False
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
lowerCAmelCase = BitModelTester(self )
| 55
| 0
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : float = 3.0
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=UpperCAmelCase_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def lowercase__ ( self : Dict ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowerCAmelCase : Dict = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
lowerCAmelCase : int = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowerCAmelCase : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , UpperCAmelCase_ )
@require_multi_gpu
def lowercase__ ( self : Tuple ):
lowerCAmelCase : int = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
__A : Dict = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__A : Optional[int] = Accelerator(kwargs_handlers=[ddp_scaler])
__A : Tuple = torch.nn.Linear(100, 200)
__A : int = accelerator.prepare(model)
# Check the values changed in kwargs
__A : List[Any] = ''''''
__A : str = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 138
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(_UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 138
| 1
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = 9, 14 # noqa: F841
UpperCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase = defaultdict(A__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase = mst(A__ )
UpperCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase = tuple(answer[:2] )
UpperCAmelCase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 152
|
def _lowerCAmelCase ( A__: int = 1000 ):
'''simple docstring'''
UpperCAmelCase = 2**power
UpperCAmelCase = str(A__ )
UpperCAmelCase = list(A__ )
UpperCAmelCase = 0
for i in list_num:
sum_of_num += int(A__ )
return sum_of_num
if __name__ == "__main__":
__magic_name__ = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
__magic_name__ = solution(power)
print("Sum of the digits is: ", result)
| 152
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : MutableSequence[float] ) -> None:
if len(UpperCAmelCase__ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
lowerCAmelCase = list(UpperCAmelCase__ )
lowerCAmelCase = degree
def __add__( self : Any , UpperCAmelCase__ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowerCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , UpperCAmelCase__ )
else:
lowerCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , UpperCAmelCase__ )
def __sub__( self : str , UpperCAmelCase__ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : int , UpperCAmelCase__ : Polynomial ) -> Polynomial:
lowerCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : int | float ) -> int | float:
lowerCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[Any] ) -> str:
lowerCAmelCase = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCAmelCase__ )
return polynomial
def __repr__( self : str ) -> str:
return self.__str__()
def __UpperCAmelCase ( self : Optional[Any] ) -> Polynomial:
lowerCAmelCase = [0] * self.degree
for i in range(self.degree ):
lowerCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : int | float = 0 ) -> Polynomial:
lowerCAmelCase = [0] * (self.degree + 2)
lowerCAmelCase = constant
for i in range(self.degree + 1 ):
lowerCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , UpperCAmelCase__ )
def __eq__( self : List[str] , UpperCAmelCase__ : object ) -> bool:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : int , UpperCAmelCase__ : object ) -> bool:
return not self.__eq__(UpperCAmelCase__ )
| 4
|
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float )->float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float , )->float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float , )->float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
lowerCAmelCase_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159
| 0
|
"""simple docstring"""
import argparse
import os
import re
snake_case_ = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
snake_case_ = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
snake_case_ = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def _lowerCAmelCase ( lowercase_ , lowercase_ = False ):
with open(lowercase_ , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = content.split('\n' )
UpperCAmelCase = []
UpperCAmelCase = 0
while line_idx < len(lowercase_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCAmelCase = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCAmelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCAmelCase = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCAmelCase = sorted(lowercase_ , key=lambda lowercase_ : _re_identifier.search(lowercase_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(lowercase_ ) )
elif "\n".join(lowercase_ ) != content:
return True
def _lowerCAmelCase ( lowercase_ = False ):
UpperCAmelCase = [os.path.join(lowercase_ , lowercase_ ) for f in os.listdir(lowercase_ ) if f.endswith('.py' )]
UpperCAmelCase = [sort_auto_mapping(lowercase_ , overwrite=lowercase_ ) for fname in fnames]
if not overwrite and any(lowercase_ ):
UpperCAmelCase = [f for f, d in zip(lowercase_ , lowercase_ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase_ )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
snake_case_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 181
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 181
| 1
|
from collections.abc import Callable
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
UpperCamelCase = a
UpperCamelCase = b
if function(UpperCamelCase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(UpperCamelCase_ ) == 0:
return b
elif (
function(UpperCamelCase_ ) * function(UpperCamelCase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(UpperCamelCase_ ) == 0:
return mid
elif function(UpperCamelCase_ ) * function(UpperCamelCase_ ) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def lowercase( UpperCamelCase_ ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 343
|
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 343
| 1
|
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = ['''image_processor''', '''tokenizer''']
lowerCAmelCase_ = '''AutoImageProcessor'''
lowerCAmelCase_ = '''AutoTokenizer'''
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase__ , )
lowercase_ : Tuple = kwargs.pop('''feature_extractor''' )
lowercase_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase_ : List[Any] = self.image_processor
lowercase_ : Any = False
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase_ : Union[str, Any] = kwargs.pop('''images''' , lowerCAmelCase__ )
lowercase_ : Tuple = kwargs.pop('''text''' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase_ : Dict = args[0]
lowercase_ : str = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowercase_ : Optional[Any] = self.image_processor(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
lowercase_ : List[str] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase_ : Optional[Any] = encodings["input_ids"]
return inputs
def _snake_case ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _snake_case ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@contextmanager
def _snake_case ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowercase_ : Optional[Any] = True
lowercase_ : List[Any] = self.tokenizer
yield
lowercase_ : Union[str, Any] = self.image_processor
lowercase_ : int = False
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if added_vocab is None:
lowercase_ : str = self.tokenizer.get_added_vocab()
lowercase_ : int = {}
while tokens:
lowercase_ : Union[str, Any] = re.search(R'''<s_(.*?)>''' , lowerCAmelCase__ , re.IGNORECASE )
if start_token is None:
break
lowercase_ : Union[str, Any] = start_token.group(1 )
lowercase_ : Union[str, Any] = re.search(RF'''</s_{key}>''' , lowerCAmelCase__ , re.IGNORECASE )
lowercase_ : Tuple = start_token.group()
if end_token is None:
lowercase_ : Any = tokens.replace(lowerCAmelCase__ , '''''' )
else:
lowercase_ : Any = end_token.group()
lowercase_ : Tuple = re.escape(lowerCAmelCase__ )
lowercase_ : Any = re.escape(lowerCAmelCase__ )
lowercase_ : Optional[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , lowerCAmelCase__ , re.IGNORECASE )
if content is not None:
lowercase_ : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowercase_ : Optional[Any] = self.tokenajson(lowerCAmelCase__ , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__ )
if value:
if len(lowerCAmelCase__ ) == 1:
lowercase_ : str = value[0]
lowercase_ : Tuple = value
else: # leaf nodes
lowercase_ : List[Any] = []
for leaf in content.split(R'''<sep/>''' ):
lowercase_ : Union[str, Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowercase_ : Tuple = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase__ )
if len(output[key] ) == 1:
lowercase_ : Tuple = output[key][0]
lowercase_ : Tuple = tokens[tokens.find(lowerCAmelCase__ ) + len(lowerCAmelCase__ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__ )
if len(lowerCAmelCase__ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def _snake_case ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase__ , )
return self.image_processor
| 367
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__SCREAMING_SNAKE_CASE ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 264
| 0
|
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=32 , UpperCamelCase=10 , UpperCamelCase=100 , UpperCamelCase=1026 , UpperCamelCase=True , UpperCamelCase="data/tokenized_stories_train_wikitext103.jbl" , UpperCamelCase="igf_context_pairs.jbl" , ) -> List[str]:
set_seed(3 )
# generate train_data and objective_set
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = generate_datasets(
UpperCamelCase , UpperCamelCase , number=UpperCamelCase , min_len=1026 , trim=UpperCamelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCamelCase__ : Any = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
lowerCamelCase__ : Optional[int] = load_gpta("""gpt2""" ).to(UpperCamelCase )
print("""computing perplexity on objective set""" )
lowerCamelCase__ : Dict = compute_perplexity(UpperCamelCase , UpperCamelCase , UpperCamelCase ).item()
print("""perplexity on objective set:""" , UpperCamelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=15 , UpperCamelCase=128 , UpperCamelCase=100 , UpperCamelCase="igf_model.pt" , ) -> str:
set_seed(42 )
# Load pre-trained model
lowerCamelCase__ : Optional[int] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
lowerCamelCase__ : List[Any] = SecondaryLearner(UpperCamelCase )
# Train secondary learner
lowerCamelCase__ : List[str] = train_secondary_learner(
UpperCamelCase , UpperCamelCase , max_epochs=UpperCamelCase , batch_size=UpperCamelCase , eval_freq=100 , igf_model_path=UpperCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=32 , UpperCamelCase=1000 , UpperCamelCase=16 , UpperCamelCase=1.0 , UpperCamelCase=recopy_gpta , UpperCamelCase=None , UpperCamelCase=10 , UpperCamelCase="gpt2_finetuned.pt" , ) -> List[Any]:
lowerCamelCase__ : List[str] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
lowerCamelCase__ : Union[str, Any] = RandomSampler(UpperCamelCase )
lowerCamelCase__ : str = DataLoader(UpperCamelCase , sampler=UpperCamelCase )
lowerCamelCase__ : Optional[int] = max_steps // (len(UpperCamelCase )) + 1
lowerCamelCase__ : int = 0
lowerCamelCase__ : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=UpperCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = recopy_model(UpperCamelCase , UpperCamelCase , UpperCamelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(UpperCamelCase )
secondary_learner.eval()
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[Any] = []
# Compute the performance of the transformer model at the beginning
lowerCamelCase__ : Optional[int] = compute_perplexity(UpperCamelCase , UpperCamelCase , UpperCamelCase )
test_perps.append(UpperCamelCase )
print("""Test perplexity, step""" , UpperCamelCase , """:""" , UpperCamelCase )
for epoch in range(int(UpperCamelCase ) ):
for step, example in enumerate(UpperCamelCase ):
torch.cuda.empty_cache()
lowerCamelCase__ : Optional[int] = random.randint(0 , example.size(2 ) - context_len - 1 )
lowerCamelCase__ : Dict = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCamelCase__ : int = model(UpperCamelCase , labels=UpperCamelCase )
lowerCamelCase__ : Dict = True
if secondary_learner is not None:
lowerCamelCase__ : str = secondary_learner.forward(
torch.tensor(UpperCamelCase , dtype=torch.long , device=UpperCamelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(UpperCamelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowerCamelCase__ : List[str] = -1
if predicted_q < threshold:
lowerCamelCase__ : str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowerCamelCase__ : Optional[int] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCamelCase__ : Optional[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCamelCase__ : Any = compute_perplexity(UpperCamelCase , UpperCamelCase , UpperCamelCase )
test_perps.append(UpperCamelCase )
print("""Test perplexity, step""" , UpperCamelCase , """:""" , UpperCamelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , UpperCamelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=UpperCamelCase , default=UpperCamelCase , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=UpperCamelCase , default=UpperCamelCase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=UpperCamelCase , type=UpperCamelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=UpperCamelCase , default=UpperCamelCase , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=UpperCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=UpperCamelCase , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=UpperCamelCase , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1000 , type=UpperCamelCase , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=UpperCamelCase , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=UpperCamelCase , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=UpperCamelCase , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=UpperCamelCase , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1026 , type=UpperCamelCase , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=UpperCamelCase , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=UpperCamelCase , type=UpperCamelCase , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=UpperCamelCase , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=UpperCamelCase , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=UpperCamelCase , type=UpperCamelCase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=UpperCamelCase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
lowerCamelCase__ : int = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
lowerCamelCase__ : List[Any] = training_secondary_learner(
UpperCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
lowerCamelCase__ : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowerCamelCase__ , lowerCamelCase__ : List[str] = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1026 , trim=UpperCamelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
UpperCamelCase , UpperCamelCase , UpperCamelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=UpperCamelCase , secondary_learner=UpperCamelCase , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 41
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def snake_case ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
pass
def __snake_case ( UpperCAmelCase_ : List[Any] ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ : Dict = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model=UpperCamelCase , tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(UpperCamelCase ) , UpperCamelCase , "" ) ) )
lowerCamelCase_ = "What is the placebo?"
lowerCamelCase_ = [
{
"image": load_image(UpperCamelCase ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = dqa_pipeline(UpperCamelCase , top_k=2 )
self.assertEqual(
UpperCamelCase , [
[
{"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase ), "start": ANY(UpperCamelCase ), "end": ANY(UpperCamelCase )},
{"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase ), "start": ANY(UpperCamelCase ), "end": ANY(UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "How many cats are there?"
lowerCamelCase_ = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase , decimals=4 ) , UpperCamelCase )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase , decimals=4 ) , UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase_ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase_ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , words=UpperCamelCase , boxes=UpperCamelCase , top_k=2 )
self.assertEqual(UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase )
lowerCamelCase_ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase , revision="3dc6de3" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(UpperCamelCase ) , UpperCamelCase , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase )
lowerCamelCase_ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase , revision="3dc6de3" , max_seq_len=50 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(UpperCamelCase ) , UpperCamelCase , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def snake_case ( self ):
"""simple docstring"""
pass
| 55
| 0
|
"""simple docstring"""
def lowercase ( A_ = 10 , A_ = 22 )-> int:
'''simple docstring'''
a : str = range(1 , A_ )
a : List[Any] = range(1 , A_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 226
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowercase = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def lowercase ( A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
a : str = True
# Deal with multi-line cases
elif (
re.search(
RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , A_ , )
is not None
):
a : List[Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a : str = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a : Tuple = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
a : str = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
a : int = True
if not attribute_used:
a : Dict = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a : Optional[int] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a : List[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a : str = True
elif attribute.endswith("_token_id" ):
a : str = True
# configuration class specific cases
if not case_allowed:
a : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
a : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
a : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
a : Any = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
a : str = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a : Dict = {}
if len(config_class.attribute_map ) > 0:
a : int = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a : int = inspect.getsourcefile(A_ )
a : Union[str, Any] = os.path.dirname(A_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a : Optional[Any] = [os.path.join(A_ , A_ ) for fn in os.listdir(A_ ) if fn.startswith("modeling_" )]
# Get the source code strings
a : Tuple = []
for path in modeling_paths:
if os.path.isfile(A_ ):
with open(A_ ) as fp:
modeling_sources.append(fp.read() )
a : Optional[Any] = []
for config_param, default_value in zip(A_ , A_ ):
# `attributes` here is all the variant names for `config_param`
a : str = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A_ , A_ , A_ , A_ ):
unused_attributes.append(attributes[0] )
return sorted(A_ )
def lowercase ( )-> str:
'''simple docstring'''
a : List[Any] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda A_ : inspect.isclass(A_ )
and issubclass(A_ , A_ )
and inspect.getmodule(A_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
a : Union[str, Any] = check_config_attributes_being_used(A_ )
if len(A_ ) > 0:
a : Dict = unused_attributes
if len(A_ ) > 0:
a : Union[str, Any] = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(A_ )
if __name__ == "__main__":
check_config_attributes()
| 226
| 1
|
'''simple docstring'''
def _a( UpperCamelCase__ : int = 1_0_0_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =2**power
SCREAMING_SNAKE_CASE__ : Union[str, Any] =str(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =list(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =0
for i in list_num:
sum_of_num += int(UpperCamelCase__ )
return sum_of_num
if __name__ == "__main__":
a_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
a_ = solution(power)
print('Sum of the digits is: ', result)
| 152
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
a_ = re.compile(R'\s+')
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(UpperCamelCase__, '''''', example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def _a( UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[len(UpperCamelCase__ ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(UpperCamelCase__ ), "line_max": max(UpperCamelCase__ )}
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Any ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Optional[Any]=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''auto-generated''', '''autogenerated''', '''automatically generated''']
SCREAMING_SNAKE_CASE__ : Dict =example['''content'''].splitlines()
for _, line in zip(range(UpperCamelCase__ ), UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple=5, UpperCamelCase__ : Optional[Any]=0.0_5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =['''unit tests''', '''test file''', '''configuration file''']
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
SCREAMING_SNAKE_CASE__ : List[str] =0
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
# first test
for _, line in zip(range(UpperCamelCase__ ), UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
SCREAMING_SNAKE_CASE__ : List[str] =example['''content'''].count('''\n''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''def ''', '''class ''', '''for ''', '''while ''']
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Dict=4 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =tokenizer(example['''content'''], truncation=UpperCamelCase__ )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(example['''content'''] ) / len(UpperCamelCase__ )
return {"ratio": ratio}
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict ={}
results.update(get_hash(UpperCamelCase__ ) )
results.update(line_stats(UpperCamelCase__ ) )
results.update(alpha_stats(UpperCamelCase__ ) )
results.update(char_token_ratio(UpperCamelCase__ ) )
results.update(is_autogenerated(UpperCamelCase__ ) )
results.update(is_config_or_test(UpperCamelCase__ ) )
results.update(has_no_keywords(UpperCamelCase__ ) )
results.update(has_few_assignments(UpperCamelCase__ ) )
return results
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : str ):
'''simple docstring'''
if not check_uniques(UpperCamelCase__, UpperCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
with open(UpperCamelCase__, '''rb''' ) as f_in:
with gzip.open(str(UpperCamelCase__ ) + '''.gz''', '''wb''', compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCamelCase__, UpperCamelCase__ )
os.unlink(UpperCamelCase__ )
# Settings
a_ = HfArgumentParser(PreprocessingArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
a_ = time.time()
a_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
a_ = set(ds.unique('hash'))
a_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
a_ = time.time()
a_ = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
a_ = time.time()
a_ , a_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
a_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
a_ = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
a_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
a_ = str(data_dir / F'''file-{file_number+1:012}.json''')
a_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 152
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Tuple = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
snake_case : List[str] = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert("RGB" )
return image
def __lowerCAmelCase ( lowercase : List[str] ) -> Dict:
"""simple docstring"""
snake_case : Any = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def __lowerCAmelCase ( lowercase : List[str] , lowercase : List[str] , lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
snake_case : Tuple = dct.pop(lowercase )
snake_case : Optional[int] = val
def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case : Any = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
snake_case : int = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
snake_case : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
snake_case : Union[str, Any] = qkv_bias
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case : str = 364 if "coco" in model_name else 224
snake_case : Optional[int] = BlipaVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
snake_case : int = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=lowercase ).to_dict()
elif "opt-6.7b" in model_name:
snake_case : Dict = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=lowercase ).to_dict()
elif "t5-xl" in model_name:
snake_case : Dict = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case : str = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
snake_case : List[Any] = BlipaConfig(vision_config=lowercase , text_config=lowercase )
return config, image_size
@torch.no_grad()
def __lowerCAmelCase ( lowercase : Any , lowercase : Union[str, Any]=None , lowercase : Optional[Any]=False ) -> str:
"""simple docstring"""
snake_case : int = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
snake_case : Any = tokenizer("\n" , add_special_tokens=lowercase ).input_ids[0]
snake_case ,snake_case : List[str] = get_blipa_config(lowercase , eos_token_id=lowercase )
snake_case : List[str] = BlipaForConditionalGeneration(lowercase ).eval()
snake_case : str = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
snake_case ,snake_case : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
snake_case : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
snake_case ,snake_case ,snake_case : Optional[int] = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print("Done!" )
# update state dict keys
snake_case : int = original_model.state_dict()
snake_case : Optional[int] = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case : Optional[int] = state_dict.pop(lowercase )
if key.startswith("Qformer.bert" ):
snake_case : Union[str, Any] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
snake_case : int = key.replace("self" , "attention" )
if "opt_proj" in key:
snake_case : str = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
snake_case : Tuple = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
snake_case : Optional[Any] = key.replace("opt" , "language" )
if key.startswith("t5" ):
snake_case : Optional[Any] = key.replace("t5" , "language" )
snake_case : Tuple = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
snake_case ,snake_case : Optional[Any] = hf_model.load_state_dict(lowercase , strict=lowercase )
assert len(lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
snake_case : List[Any] = load_demo_image()
snake_case : List[str] = vis_processors["eval"](lowercase ).unsqueeze(0 ).to(lowercase )
snake_case : Optional[int] = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(lowercase )
# create processor
snake_case : List[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=lowercase , image_std=lowercase )
snake_case : Optional[int] = BlipaProcessor(image_processor=lowercase , tokenizer=lowercase )
snake_case : List[str] = processor(images=lowercase , return_tensors="pt" ).pixel_values.to(lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "opt" in model_name:
snake_case : List[str] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
snake_case : List[str] = hf_model(lowercase , lowercase ).logits
else:
snake_case : int = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
snake_case : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
snake_case : Dict = hf_model(lowercase , lowercase , labels=lowercase ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
snake_case : str = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
snake_case : int = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase )
else:
# cast to same type
snake_case : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase ) , lowercase , atol=1e-2 )
print("Looks ok!" )
print("Generating a caption..." )
snake_case : Dict = ""
snake_case : List[Any] = tokenizer(lowercase , return_tensors="pt" ).input_ids.to(lowercase )
snake_case : Any = original_model.generate({"image": original_pixel_values} )
snake_case : Dict = hf_model.generate(
lowercase , lowercase , do_sample=lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , lowercase )
snake_case : Any = input_ids.shape[1]
snake_case : List[Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase )
snake_case : str = [text.strip() for text in output_text]
print("HF generation:" , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 112
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__snake_case = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = [file for file in os.listdir(UpperCamelCase__ ) if os.path.isfile(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )]
if identifier is not None:
snake_case : Optional[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for n_ in n_identifier:
snake_case : str = [file for file in files if n_ not in file]
else:
snake_case : str = [file for file in files if n_identifier not in file]
snake_case : Tuple = ignore_files or []
ignore_files.append("__init__.py" )
snake_case : Optional[Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , UpperCamelCase__ )
if only_modules:
snake_case : str = file.split("." )[0]
try:
snake_case : Optional[int] = getattr(UpperCamelCase__ , UpperCamelCase__ )
snake_case : str = doctest.DocTestSuite(UpperCamelCase__ )
snake_case : Optional[Any] = unittest.TextTestRunner().run(UpperCamelCase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
snake_case : Tuple = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Tuple = Path("src/transformers" )
snake_case : List[Any] = "modeling"
snake_case : Dict = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ , ignore_files=UpperCamelCase__ )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = Path("src/transformers" )
snake_case : Optional[Any] = "tokenization"
self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = Path("src/transformers" )
snake_case : Optional[Any] = "configuration"
self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = Path("src/transformers" )
snake_case : List[str] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(UpperCamelCase__ , n_identifier=UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Any = Path("docs/source" )
snake_case : Tuple = ["favicon.ico"]
self.analyze_directory(UpperCamelCase__ , ignore_files=UpperCamelCase__ , only_modules=UpperCamelCase__ )
| 112
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class lowerCamelCase_ :
def __init__( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : list[Any] = []
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int = 0
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.head == self.tail
def lowercase_ ( self : Dict , _A : Any ):
'''simple docstring'''
self.data.append(_A )
UpperCAmelCase__ : Optional[int] = self.tail + 1
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.data[self.head]
UpperCAmelCase__ : Optional[Any] = self.head + 1
return ret
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return self.tail - self.head
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class lowerCamelCase_ :
def __init__( self : Any , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = data
UpperCAmelCase__ : MyNode | None = None
UpperCAmelCase__ : MyNode | None = None
UpperCAmelCase__ : int = 1
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return self.data
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return self.left
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.right
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return self.height
def lowercase_ ( self : Any , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = data
def lowercase_ ( self : List[str] , _A : MyNode | None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = node
def lowercase_ ( self : str , _A : MyNode | None ):
'''simple docstring'''
UpperCAmelCase__ : int = node
def lowercase_ ( self : Optional[int] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = height
def a__ ( lowerCAmelCase__ ) -> int:
if node is None:
return 0
return node.get_height()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if a > b:
return a
return b
def a__ ( lowerCAmelCase__ ) -> MyNode:
print('''left rotation node:''' , node.get_data() )
UpperCAmelCase__ : Optional[Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def a__ ( lowerCAmelCase__ ) -> MyNode:
print('''right rotation node:''' , node.get_data() )
UpperCAmelCase__ : List[str] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def a__ ( lowerCAmelCase__ ) -> MyNode:
UpperCAmelCase__ : int = node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCAmelCase__ ) )
return right_rotation(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> MyNode:
UpperCAmelCase__ : Union[str, Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCAmelCase__ ) )
return left_rotation(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> MyNode | None:
if node is None:
return MyNode(lowerCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
UpperCAmelCase__ : Union[str, Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
UpperCAmelCase__ : Tuple = right_rotation(lowerCAmelCase__ )
else:
UpperCAmelCase__ : Any = lr_rotation(lowerCAmelCase__ )
else:
node.set_right(insert_node(node.get_right() , lowerCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
UpperCAmelCase__ : str = node.get_right()
assert right_child is not None
if data < right_child.get_data():
UpperCAmelCase__ : Optional[Any] = rl_rotation(lowerCAmelCase__ )
else:
UpperCAmelCase__ : str = left_rotation(lowerCAmelCase__ )
UpperCAmelCase__ : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
return node
def a__ ( lowerCAmelCase__ ) -> Any:
while True:
UpperCAmelCase__ : Optional[int] = root.get_right()
if right_child is None:
break
UpperCAmelCase__ : str = right_child
return root.get_data()
def a__ ( lowerCAmelCase__ ) -> Any:
while True:
UpperCAmelCase__ : List[str] = root.get_left()
if left_child is None:
break
UpperCAmelCase__ : int = left_child
return root.get_data()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> MyNode | None:
UpperCAmelCase__ : Any = root.get_left()
UpperCAmelCase__ : Dict = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
UpperCAmelCase__ : str = get_left_most(lowerCAmelCase__ )
root.set_data(lowerCAmelCase__ )
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
elif left_child is not None:
UpperCAmelCase__ : int = left_child
elif right_child is not None:
UpperCAmelCase__ : Optional[int] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
if get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
UpperCAmelCase__ : Any = left_rotation(lowerCAmelCase__ )
else:
UpperCAmelCase__ : Tuple = rl_rotation(lowerCAmelCase__ )
elif get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
UpperCAmelCase__ : Dict = right_rotation(lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = lr_rotation(lowerCAmelCase__ )
UpperCAmelCase__ : int = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCAmelCase__ )
return root
class lowerCamelCase_ :
def __init__( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : MyNode | None = None
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return get_height(self.root )
def lowercase_ ( self : Optional[Any] , _A : Any ):
'''simple docstring'''
print('''insert:''' + str(_A ) )
UpperCAmelCase__ : Dict = insert_node(self.root , _A )
def lowercase_ ( self : Optional[Any] , _A : Any ):
'''simple docstring'''
print('''delete:''' + str(_A ) )
if self.root is None:
print('''Tree is empty!''' )
return
UpperCAmelCase__ : Optional[Any] = del_node(self.root , _A )
def __str__( self : List[str] , ): # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
UpperCAmelCase__ : Dict = ''''''
UpperCAmelCase__ : Any = MyQueue()
q.push(self.root )
UpperCAmelCase__ : str = self.get_height()
if layer == 0:
return output
UpperCAmelCase__ : Optional[int] = 0
while not q.is_empty():
UpperCAmelCase__ : Optional[int] = q.pop()
UpperCAmelCase__ : Optional[Any] = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_A )
q.push(_A )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
UpperCAmelCase__ : Optional[int] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _A ) - 1:
UpperCAmelCase__ : Union[str, Any] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def a__ ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
UpperCamelCase__ = AVLtree()
UpperCamelCase__ = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 181
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Any = min(lowerCAmelCase__ ) # min() finds the minimum value
UpperCAmelCase__ : Optional[int] = max(lowerCAmelCase__ ) # max() finds the maximum value
UpperCAmelCase__ : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCAmelCase__ : Any = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCAmelCase__ : Optional[int] = 0
for count in range(lowerCAmelCase__ ):
while holes[count] > 0:
holes[count] -= 1
UpperCAmelCase__ : Dict = count + min_val
i += 1
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase__ : List[str] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCAmelCase__ )
print('''Sorted order is:''' , ''' '''.join(lowerCAmelCase__ ) )
if __name__ == "__main__":
main()
| 181
| 1
|
import string
def snake_case_ ( lowerCAmelCase_ : str ):
for key in range(len(string.ascii_uppercase ) ):
__lowercase : Any = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__lowercase : Union[str, Any] = string.ascii_uppercase.find(lowerCAmelCase_ )
__lowercase : Any = num - key
if num < 0:
__lowercase : Optional[int] = num + len(string.ascii_uppercase )
__lowercase : List[Any] = translated + string.ascii_uppercase[num]
else:
__lowercase : Union[str, Any] = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def snake_case_ ( ):
__lowercase : int = input("""Encrypted message: """ )
__lowercase : int = message.upper()
decrypt(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 306
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''nllb-moe'''
_A : List[str] = ['''past_key_values''']
_A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any:
"""simple docstring"""
__lowercase : int = vocab_size
__lowercase : List[Any] = max_position_embeddings
__lowercase : Tuple = d_model
__lowercase : str = encoder_ffn_dim
__lowercase : List[str] = encoder_layers
__lowercase : int = encoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : int = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Union[str, Any] = dropout
__lowercase : str = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : List[str] = init_std
__lowercase : Optional[int] = encoder_layerdrop
__lowercase : str = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Optional[Any] = encoder_layers
__lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : List[Any] = router_z_loss_coef
__lowercase : Tuple = router_aux_loss_coef
__lowercase : str = decoder_sparse_step
__lowercase : Any = encoder_sparse_step
__lowercase : str = num_experts
__lowercase : List[Any] = expert_capacity
__lowercase : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : Optional[int] = router_dtype
__lowercase : Any = router_ignore_padding_tokens
__lowercase : Optional[Any] = batch_prioritized_routing
__lowercase : str = second_expert_policy
__lowercase : List[str] = normalize_router_prob_before_dropping
__lowercase : List[Any] = moe_eval_capacity_token_fraction
__lowercase : List[str] = moe_token_dropout
__lowercase : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 306
| 1
|
def lowerCAmelCase_ ( __a , __a , __a ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__a ) )
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> bool:
"""simple docstring"""
if index == len(__a ):
return True
# Recursive Step
for i in range(__a ):
if valid_coloring(graph[index] , __a , __a ):
# Color current vertex
lowerCamelCase__: int =i
# Validate coloring
if util_color(__a , __a , __a , index + 1 ):
return True
# Backtrack
lowerCamelCase__: List[Any] =-1
return False
def lowerCAmelCase_ ( __a , __a ) -> list[int]:
"""simple docstring"""
lowerCamelCase__: Dict =[-1] * len(__a )
if util_color(__a , __a , __a , 0 ):
return colored_vertices
return []
| 10
|
"""simple docstring"""
import sys
lowercase__ : Dict = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowercase ( _a ):
snake_case_ : List[Any] = 1
for digit in s:
product *= int(_a )
return product
def __lowercase ( _a = N ):
snake_case_ : Optional[int] = -sys.maxsize - 1
snake_case_ : str = n[:13]
snake_case_ : List[Any] = 13
while cur_index < len(_a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case_ : int = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case_ : Optional[Any] = max(_a , str_eval(_a ) )
snake_case_ : Any = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 264
| 0
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCamelCase (lowercase_: int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(_snake_case , _snake_case ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
A__ : List[str] = QuantumRegister(_snake_case , """qr""" )
A__ : Tuple = ClassicalRegister(_snake_case , """cr""" )
A__ : int = QuantumCircuit(_snake_case , _snake_case )
A__ : Tuple = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
A__ : Optional[int] = Aer.get_backend("""qasm_simulator""" )
A__ : Any = execute(_snake_case , _snake_case , shots=10000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 352
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _a :
'''simple docstring'''
def __init__( self , A__ , ):
A__ : Any = parent
A__ : Any = 13
A__ : Optional[Any] = 7
A__ : Union[str, Any] = 30
A__ : str = self.seq_length + self.mem_len
A__ : Dict = 15
A__ : int = True
A__ : Tuple = True
A__ : Union[str, Any] = 99
A__ : Optional[Any] = [10, 50, 80]
A__ : str = 32
A__ : Tuple = 32
A__ : Union[str, Any] = 4
A__ : Optional[Any] = 8
A__ : int = 128
A__ : List[Any] = 2
A__ : List[str] = 2
A__ : int = None
A__ : List[str] = 1
A__ : Union[str, Any] = 0
A__ : List[str] = 3
A__ : int = self.vocab_size - 1
A__ : Optional[Any] = 0.0_1
def __A ( self ):
A__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[Any] = None
if self.use_labels:
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , A__ , A__ , A__ , A__ ):
A__ : Dict = TFTransfoXLModel(A__ )
A__ , A__ : Tuple = model(A__ ).to_tuple()
A__ : List[str] = {"""input_ids""": input_ids_a, """mems""": mems_a}
A__ , A__ : str = model(A__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , A__ , A__ , A__ , A__ ):
A__ : Optional[int] = TFTransfoXLLMHeadModel(A__ )
A__ , A__ : int = model(A__ ).to_tuple()
A__ : int = {"""input_ids""": input_ids_a, """labels""": lm_labels}
A__ , A__ : Optional[Any] = model(A__ ).to_tuple()
A__ , A__ : Union[str, Any] = model([input_ids_a, mems_a] ).to_tuple()
A__ : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
A__ , A__ : Tuple = model(A__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , A__ , A__ , A__ , A__ ):
A__ : Any = TFTransfoXLForSequenceClassification(A__ )
A__ : Optional[Any] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
A__ : Optional[Any] = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__)) : List[Any] = config_and_inputs
A__ : int = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase__: Optional[Any] = () if is_tf_available() else ()
UpperCAmelCase__: int = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCAmelCase__: Optional[int] = False
UpperCAmelCase__: Optional[int] = False
UpperCAmelCase__: Tuple = False
UpperCAmelCase__: List[str] = False
def __A ( self , A__ , A__ , A__ , A__ , A__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
A__ : Tuple = TFTransfoXLModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=A__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A__ )
def __A ( self ):
self.model_tester.set_seed()
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A__ )
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A__ )
def __A ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Union[str, Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
A__ : Any = model_class(A__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
A__ : Optional[Any] = model.get_output_embeddings()
assert isinstance(A__ , tf.keras.layers.Layer )
A__ : Tuple = model.get_bias()
assert name is None
else:
A__ : Dict = model.get_output_embeddings()
assert x is None
A__ : int = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[Any] = TFTransfoXLModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
A__ : List[Any] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
A__ : Tuple = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
A__ : Dict = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
A__ : Any = model.generate(A__ , max_length=200 , do_sample=A__ )
self.assertListEqual(output_ids[0].numpy().tolist() , A__ )
| 141
| 0
|
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a_ , '''width_multiplier''' ) )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : str , a_ : Any , a_ : Tuple=13 , a_ : Tuple=64 , a_ : Optional[int]=2 , a_ : List[str]=3 , a_ : Optional[Any]="swish" , a_ : Optional[Any]=3 , a_ : str=32 , a_ : Dict=0.1 , a_ : int=0.0_2 , a_ : Tuple=True , a_ : List[Any]=True , a_ : Optional[int]=10 , a_ : Optional[int]=None , a_ : Dict=0.2_5 , a_ : Tuple=0.0 , a_ : List[Any]=0.0 , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : List[Any] = patch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Any = make_divisible(5_12 * width_multiplier , divisor=8 )
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Dict = conv_kernel_size
__UpperCAmelCase : Optional[Any] = output_stride
__UpperCAmelCase : Dict = classifier_dropout_prob
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : Tuple = num_labels
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Optional[Any] = scope
__UpperCAmelCase : Optional[Any] = width_multiplier
__UpperCAmelCase : List[str] = ffn_dropout
__UpperCAmelCase : Dict = attn_dropout
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def snake_case__ ( self : Optional[Any] , a_ : Dict , a_ : List[Any] , a_ : Dict , a_ : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = MobileViTVaModel(config=a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : Tuple = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : Union[str, Any] , a_ : Dict , a_ : Union[str, Any] , a_ : str , a_ : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : List[str] = MobileViTVaForImageClassification(a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : str , a_ : List[str] , a_ : Optional[Any] , a_ : List[str] , a_ : int ):
'''simple docstring'''
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : List[Any] = MobileViTVaForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__UpperCAmelCase : List[str] = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = config_and_inputs
__UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = MobileViTVaModelTester(self )
__UpperCAmelCase : Union[str, Any] = MobileViTVaConfigTester(self , config_class=a_ , has_text_modality=a_ )
def snake_case__ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def snake_case__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(a_ )
__UpperCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Any = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case__ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(a_ : List[Any] , a_ : List[Any] , a_ : Union[str, Any] ):
__UpperCAmelCase : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(a_ , a_ ) )
__UpperCAmelCase : List[Any] = outputs.hidden_states
__UpperCAmelCase : str = 5
self.assertEqual(len(a_ ) , a_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__UpperCAmelCase : Any = 2
for i in range(len(a_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(a_ , a_ , a_ )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@slow
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = MobileViTVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def a ( ):
'''simple docstring'''
__UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
a_ )
__UpperCAmelCase : Optional[int] = self.default_image_processor
__UpperCAmelCase : int = prepare_img()
__UpperCAmelCase : Union[str, Any] = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**a_ )
# verify the logits
__UpperCAmelCase : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
__UpperCAmelCase : Any = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@slow
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : int = model.to(a_ )
__UpperCAmelCase : str = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Tuple = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**a_ )
__UpperCAmelCase : int = outputs.logits
# verify the logits
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , a_ )
__UpperCAmelCase : List[Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=a_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-4 ) )
@slow
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : Optional[int] = model.to(a_ )
__UpperCAmelCase : List[Any] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Dict = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**a_ )
__UpperCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
__UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=a_ , target_sizes=[(50, 60)] )
__UpperCAmelCase : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , a_ )
__UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=a_ )
__UpperCAmelCase : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , a_ )
| 226
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__: Optional[Any] = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: List[str] = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__magic_name__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 355
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__: int = logging.get_logger(__name__)
__magic_name__: Any = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__magic_name__: Union[str, Any] = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__magic_name__: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": 2_048,
}
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
with open(_A, """r""", encoding="""utf-8""" ) as f:
__magic_name__ : Tuple = json.loads(f.read() )
__magic_name__ : Optional[Any] = collections.OrderedDict()
__magic_name__ : List[str] = collections.OrderedDict()
__magic_name__ : int = collections.OrderedDict()
with open(_A, """r""", encoding="""utf-8""" ) as f:
__magic_name__ : Tuple = f.readlines()
__magic_name__ : Optional[int] = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(_A ):
__magic_name__ : Dict = b
__magic_name__ : Optional[Any] = idx
for wd in b:
__magic_name__ : Dict = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|startoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> str:
super().__init__(
unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , do_clean_text=lowerCAmelCase__ , **lowerCAmelCase__ , )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__magic_name__ : Dict = do_clean_text
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Union[str, Any] = load_vocab_and_emoji(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : str = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __magic_name__ ( self ) -> List[Any]:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
return self.subword_tokenizer.tokenize(lowerCAmelCase__ , clean=self.do_clean_text )
def __magic_name__ ( self , lowerCAmelCase__ ) -> int:
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : int = """""".join(lowerCAmelCase__ ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[int]:
__magic_name__ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__magic_name__ : int = input_ids[-self.model_max_length :]
return input_ids
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
__magic_name__ : Dict = 0
if os.path.isdir(lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__magic_name__ : Optional[Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__magic_name__ : Any = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
__magic_name__ : Optional[Any] = token_index
writer.write(""",""".join(lowerCAmelCase__ ) + """\n""" )
index += 1
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , lowerCAmelCase__ )
return vocab_file, emoji_file
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
__magic_name__ : str = vocab # same as swe
__magic_name__ : int = ids_to_tokens # same as bpe
__magic_name__ : List[str] = emoji
__magic_name__ : Optional[Any] = np.max([len(lowerCAmelCase__ ) for w in self.vocab.keys()] )
__magic_name__ : str = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__magic_name__ : str = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__magic_name__ : Any = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__magic_name__ : List[str] = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__magic_name__ : Dict = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__magic_name__ : List[Any] = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__magic_name__ : str = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__magic_name__ : Dict = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__magic_name__ : Optional[int] = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self ) -> int:
return len(self.ids_to_tokens )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : Any = self.content_repattera.sub("""<URL>""" , lowerCAmelCase__ )
__magic_name__ : Any = self.content_repattera.sub("""<EMAIL>""" , lowerCAmelCase__ )
__magic_name__ : Dict = self.content_repattera.sub("""<TEL>""" , lowerCAmelCase__ )
__magic_name__ : Tuple = self.content_repattera.sub("""<DATE>""" , lowerCAmelCase__ )
__magic_name__ : str = self.content_repattera.sub("""<DATE>""" , lowerCAmelCase__ )
__magic_name__ : Tuple = self.content_repattera.sub("""<PRICE>""" , lowerCAmelCase__ )
__magic_name__ : Dict = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__magic_name__ : str = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Any:
__magic_name__ : Union[str, Any] = text.replace(""" """ , """<SP>""" )
__magic_name__ : Optional[int] = text.replace(""" """ , """<SP>""" )
__magic_name__ : int = text.replace("""\r\n""" , """<BR>""" )
__magic_name__ : Optional[Any] = text.replace("""\n""" , """<BR>""" )
__magic_name__ : List[Any] = text.replace("""\r""" , """<BR>""" )
__magic_name__ : List[str] = text.replace("""\t""" , """<TAB>""" )
__magic_name__ : Optional[Any] = text.replace("""—""" , """ー""" )
__magic_name__ : int = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__magic_name__ : List[Any] = text.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if clean:
__magic_name__ : List[str] = self.clean_text(lowerCAmelCase__ )
def check_simbol(lowerCAmelCase__ ):
__magic_name__ : int = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 2:
__magic_name__ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2_a1 and c <= 0Xc2_bf)
or (c >= 0Xc7_80 and c <= 0Xc7_83)
or (c >= 0Xca_b9 and c <= 0Xcb_bf)
or (c >= 0Xcc_80 and c <= 0Xcd_a2)
):
return True
return False
def checkuae(lowerCAmelCase__ ):
__magic_name__ : List[Any] = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 3:
__magic_name__ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f:
return True
return False
__magic_name__ : Optional[Any] = 0
__magic_name__ : List[str] = []
while pos < len(lowerCAmelCase__ ):
__magic_name__ : Optional[int] = min(len(lowerCAmelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__magic_name__ : int = [] # (token_id, token, pos)
for e in range(lowerCAmelCase__ , lowerCAmelCase__ , -1 ):
__magic_name__ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase__ ) > 2:
__magic_name__ : Union[str, Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase__ ) > 0:
# the smallest token_id is adopted
__magic_name__ ,__magic_name__ ,__magic_name__ : str = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[0] )[0]
result.append(lowerCAmelCase__ )
__magic_name__ : List[Any] = e
else:
__magic_name__ : Dict = pos + 1
__magic_name__ : str = text[pos:end]
if check_simbol(lowerCAmelCase__ ):
result.append("""<KIGOU>""" )
elif checkuae(lowerCAmelCase__ ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__magic_name__ : List[str] = end
return result
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__="\n" ) -> Optional[int]:
__magic_name__ : Tuple = []
__magic_name__ : Tuple = []
__magic_name__ : str = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode("""utf-8""" , errors="""replace""" ) )
__magic_name__ : List[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(lowerCAmelCase__ )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode("""utf-8""" , errors="""replace""" ) )
__magic_name__ : Union[str, Any] = """""".join(lowerCAmelCase__ )
return text
| 138
| 0
|
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = precision
__SCREAMING_SNAKE_CASE : List[str] = ceil(precision / 14 )
__SCREAMING_SNAKE_CASE : List[Any] = 42_68_80 * Decimal(1_00_05 ).sqrt()
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
__SCREAMING_SNAKE_CASE : List[str] = 13_59_14_09
__SCREAMING_SNAKE_CASE : int = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCamelCase__ : Any = 50
print(f"The first {n} digits of pi is: {pi(n)}")
| 112
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ : Union[str, Any] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ : Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ : Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
UpperCamelCase__ : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
UpperCamelCase__ : Dict = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
UpperCamelCase__ : Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase__ : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase__ : Any = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : str = VOCAB_FILES_NAMES
_A : str = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_A : Any = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_A : Any = DPRContextEncoderTokenizer
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Dict = VOCAB_FILES_NAMES
_A : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_A : Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_A : Dict = DPRQuestionEncoderTokenizer
UpperCamelCase__ : str = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCamelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCamelCase__ : Any = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(lowerCamelCase__ )
class _UpperCamelCase :
'''simple docstring'''
def __call__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : str , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
__SCREAMING_SNAKE_CASE : List[Any] = titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [titles]
__SCREAMING_SNAKE_CASE : Tuple = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [texts]
__SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [questions] * n_passages
assert len(lowerCAmelCase__ ) == len(
lowerCAmelCase__ ), F"There should be as many titles than texts but got {len(lowerCAmelCase__ )} titles and {len(lowerCAmelCase__ )} texts."
__SCREAMING_SNAKE_CASE : int = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["""input_ids"""]
__SCREAMING_SNAKE_CASE : str = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["""input_ids"""]
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
}
if return_attention_mask is not False:
__SCREAMING_SNAKE_CASE : int = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__SCREAMING_SNAKE_CASE : int = attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 1_6 , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : int = 4 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = reader_input["""input_ids"""]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = reader_output[:3]
__SCREAMING_SNAKE_CASE : Tuple = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sorted(range(lowerCAmelCase__ ) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__ )
__SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__SCREAMING_SNAKE_CASE : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__SCREAMING_SNAKE_CASE : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__SCREAMING_SNAKE_CASE : List[Any] = sequence_ids.index(self.pad_token_id )
else:
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for start_index, start_score in enumerate(lowerCAmelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] , reverse=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]"
__SCREAMING_SNAKE_CASE : Optional[Any] = end_index - start_index + 1
assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = VOCAB_FILES_NAMES
_A : int = READER_PRETRAINED_VOCAB_FILES_MAP
_A : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : str = READER_PRETRAINED_INIT_CONFIGURATION
_A : Dict = ['''input_ids''', '''attention_mask''']
_A : Tuple = DPRReaderTokenizer
| 112
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Optional[int] = LDMTextToImagePipeline
snake_case : str = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
snake_case : Optional[int] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
snake_case : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case : Any = False
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase__ = CLIPTextModel(__lowerCAmelCase )
UpperCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
UpperCamelCase__ = torch.manual_seed(__lowerCAmelCase )
else:
UpperCamelCase__ = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
UpperCamelCase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowerCamelCase ( self ):
UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = LDMTextToImagePipeline(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase__ = self.get_dummy_inputs(__lowerCAmelCase )
UpperCamelCase__ = pipe(**__lowerCAmelCase ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
UpperCamelCase__ = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=torch.floataa , __lowerCAmelCase=0 ):
UpperCamelCase__ = torch.manual_seed(__lowerCAmelCase )
UpperCamelCase__ = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 4, 32, 32) )
UpperCamelCase__ = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
UpperCamelCase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowerCamelCase ( self ):
UpperCamelCase__ = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase__ = self.get_inputs(__lowerCAmelCase )
UpperCamelCase__ = pipe(**__lowerCAmelCase ).images
UpperCamelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
UpperCamelCase__ = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
UpperCamelCase__ = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=torch.floataa , __lowerCAmelCase=0 ):
UpperCamelCase__ = torch.manual_seed(__lowerCAmelCase )
UpperCamelCase__ = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 4, 32, 32) )
UpperCamelCase__ = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
UpperCamelCase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowerCamelCase ( self ):
UpperCamelCase__ = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase__ = self.get_inputs(__lowerCAmelCase )
UpperCamelCase__ = pipe(**__lowerCAmelCase ).images[0]
UpperCamelCase__ = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
UpperCamelCase__ = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 352
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Any = """xlnet"""
snake_case : Optional[Any] = ["""mems"""]
snake_case : Any = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowerCAmelCase=32000 , __lowerCAmelCase=1024 , __lowerCAmelCase=24 , __lowerCAmelCase=16 , __lowerCAmelCase=4096 , __lowerCAmelCase="gelu" , __lowerCAmelCase=True , __lowerCAmelCase="bi" , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=-1 , __lowerCAmelCase=False , __lowerCAmelCase="last" , __lowerCAmelCase=True , __lowerCAmelCase="tanh" , __lowerCAmelCase=0.1 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = n_layer
UpperCamelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCamelCase__ = d_model // n_head
UpperCamelCase__ = ff_activation
UpperCamelCase__ = d_inner
UpperCamelCase__ = untie_r
UpperCamelCase__ = attn_type
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = dropout
UpperCamelCase__ = mem_len
UpperCamelCase__ = reuse_len
UpperCamelCase__ = bi_data
UpperCamelCase__ = clamp_len
UpperCamelCase__ = same_length
UpperCamelCase__ = summary_type
UpperCamelCase__ = summary_use_proj
UpperCamelCase__ = summary_activation
UpperCamelCase__ = summary_last_dropout
UpperCamelCase__ = start_n_top
UpperCamelCase__ = end_n_top
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , __lowerCAmelCase , )
UpperCamelCase__ = kwargs["""use_cache"""]
UpperCamelCase__ = use_mems_eval
UpperCamelCase__ = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def _lowerCamelCase ( self ):
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _lowerCamelCase ( self , __lowerCAmelCase ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 87
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ,snake_case__=False ,snake_case__=False ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """backbone.""" if is_semantic else """"""
_SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'{prefix}blocks.{i}.norm1.weight', F'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm1.bias', F'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.weight', F'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.bias', F'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.weight', F'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.bias', F'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.weight', F'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.bias', F'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.weight', F'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.bias', F'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'{prefix}cls_token', """beit.embeddings.cls_token"""),
(F'{prefix}patch_embed.proj.weight', """beit.embeddings.patch_embeddings.projection.weight"""),
(F'{prefix}patch_embed.proj.bias', """beit.embeddings.patch_embeddings.projection.bias"""),
(F'{prefix}pos_embed', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=False ,snake_case__=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE = """backbone.""" if is_semantic else """"""
# queries, keys and values
_SCREAMING_SNAKE_CASE = state_dict.pop(F'{prefix}blocks.{i}.attn.qkv.weight' )
_SCREAMING_SNAKE_CASE = state_dict.pop(F'{prefix}blocks.{i}.attn.q_bias' )
_SCREAMING_SNAKE_CASE = state_dict.pop(F'{prefix}blocks.{i}.attn.v_bias' )
_SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = q_bias
_SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_SCREAMING_SNAKE_CASE = state_dict.pop(F'{prefix}blocks.{i}.gamma_1' )
_SCREAMING_SNAKE_CASE = state_dict.pop(F'{prefix}blocks.{i}.gamma_2' )
_SCREAMING_SNAKE_CASE = gamma_a
_SCREAMING_SNAKE_CASE = gamma_a
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(snake_case__ ,stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=False ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = False if """rvlcdip""" in checkpoint_url else True
_SCREAMING_SNAKE_CASE = BeitConfig(use_absolute_position_embeddings=snake_case__ ,use_mask_token=snake_case__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
# labels
if "rvlcdip" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = """rvlcdip-id2label.json"""
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(snake_case__ ,snake_case__ ,repo_type="""dataset""" ) ,"""r""" ) )
_SCREAMING_SNAKE_CASE = {int(snake_case__ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(snake_case__ ,map_location="""cpu""" )["""model"""]
_SCREAMING_SNAKE_CASE = create_rename_keys(snake_case__ ,has_lm_head=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ ,snake_case__ ,snake_case__ )
read_in_q_k_v(snake_case__ ,snake_case__ ,has_lm_head=snake_case__ )
# load HuggingFace model
_SCREAMING_SNAKE_CASE = BeitForMaskedImageModeling(snake_case__ ) if has_lm_head else BeitForImageClassification(snake_case__ )
model.eval()
model.load_state_dict(snake_case__ )
# Check outputs on an image
_SCREAMING_SNAKE_CASE = BeitImageProcessor(
size=config.image_size ,resample=PILImageResampling.BILINEAR ,do_center_crop=snake_case__ )
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=snake_case__ ,return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = encoding["""pixel_values"""]
_SCREAMING_SNAKE_CASE = model(snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.logits
# verify logits
_SCREAMING_SNAKE_CASE = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(snake_case__ ), "Shape of logits not as expected"
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
if has_lm_head:
_SCREAMING_SNAKE_CASE = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
_SCREAMING_SNAKE_CASE = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case__ ,snake_case__ ) ,organization="""nielsr""" ,commit_message="""Add image processor""" ,use_temp_dir=snake_case__ ,)
model.push_to_hub(
repo_path_or_name=Path(snake_case__ ,snake_case__ ) ,organization="""nielsr""" ,commit_message="""Add model""" ,use_temp_dir=snake_case__ ,)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
UpperCamelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 306
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306
| 1
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__UpperCAmelCase = '''.'''
if __name__ == "__main__":
__UpperCAmelCase = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
__UpperCAmelCase = []
__UpperCAmelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__UpperCAmelCase = line.strip()
__UpperCAmelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__UpperCAmelCase = '''\n'''.join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 362
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=19, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> Optional[int]:
UpperCamelCase : Any = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Optional[Any] = seq_length
UpperCamelCase : Tuple = is_training
UpperCamelCase : List[str] = use_input_mask
UpperCamelCase : Optional[int] = use_token_type_ids
UpperCamelCase : Any = use_labels
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : int = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : str = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : List[Any] = num_labels
UpperCamelCase : Any = num_choices
UpperCamelCase : Tuple = scope
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Optional[Any] = None
if self.use_input_mask:
UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = EsmConfig(
vocab_size=33, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=SCREAMING_SNAKE_CASE_, esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False}, )
return config
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Union[str, Any] = EsmForProteinFolding(config=SCREAMING_SNAKE_CASE_ ).float()
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Tuple = config_and_inputs
UpperCamelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase__ : int = ()
UpperCAmelCase__ : List[str] = {} if is_torch_available() else {}
UpperCAmelCase__ : Optional[int] = False
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = EsmFoldModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> int:
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Does not support attention outputs' )
def snake_case_ ( self ) -> Tuple:
pass
@unittest.skip
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('Esm does not support embedding resizing' )
def snake_case_ ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def snake_case_ ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def snake_case_ ( self ) -> str:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip('ESMFold only has one output format.' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def snake_case_ ( self ) -> Tuple:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Optional[Any]:
pass
@require_torch
class lowerCAmelCase_ ( a__ ):
@slow
def snake_case_ ( self ) -> str:
UpperCamelCase : Union[str, Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
UpperCamelCase : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )['positions']
UpperCamelCase : int = torch.tensor([2.58_28, 0.79_93, -10.93_34], dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 103
| 0
|
import random
from typing import Any
def UpperCamelCase_( lowerCamelCase_ ) -> list[Any]:
for _ in range(len(lowerCamelCase_ ) ):
_lowercase : Optional[int] = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowercase : str = random.randint(0 , len(lowerCamelCase_ ) - 1 )
_lowercase , _lowercase : Optional[int] = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE : int = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 21
|
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : str, lowercase__ : bool = False ):
'''simple docstring'''
if not isinstance(lowercase__, lowercase__ ):
__lowercase =F'''Expected string as input, found {type(lowercase__ )}'''
raise ValueError(lowercase__ )
if not isinstance(lowercase__, lowercase__ ):
__lowercase =F'''Expected boolean as use_pascal parameter, found {type(lowercase__ )}'''
raise ValueError(lowercase__ )
__lowercase =input_str.split('_' )
__lowercase =0 if use_pascal else 1
__lowercase =words[start_index:]
__lowercase =[word[0].upper() + word[1:] for word in words_to_capitalize]
__lowercase ='' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 141
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__A : Tuple = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__A : Tuple = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
__A : List[Any] = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
__A : Dict = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = NllbTokenizer
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def __init__(self : Any , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Any="</s>" , __SCREAMING_SNAKE_CASE : Any="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="<unk>" , __SCREAMING_SNAKE_CASE : Tuple="<pad>" , __SCREAMING_SNAKE_CASE : str="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple=False , **__SCREAMING_SNAKE_CASE : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token
A = legacy_behaviour
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , legacy_behaviour=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
A = vocab_file
A = False if not self.vocab_file else True
A = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
A = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A = src_lang if src_lang is not None else "eng_Latn"
A = self.convert_tokens_to_ids(self._src_lang)
A = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def SCREAMING_SNAKE_CASE__ (self : Any):
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE__ (self : Optional[Any] , __SCREAMING_SNAKE_CASE : str):
A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def SCREAMING_SNAKE_CASE__ (self : Tuple , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE__ (self : Tuple , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] , __SCREAMING_SNAKE_CASE : Optional[str] , **__SCREAMING_SNAKE_CASE : List[Any]):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
A = src_lang
A = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
A = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
A = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE__ (self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str = "eng_Latn" , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "fra_Latn" , **__SCREAMING_SNAKE_CASE : Tuple , ):
A = src_lang
A = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Any):
return self.set_src_lang_special_tokens(self.src_lang)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def SCREAMING_SNAKE_CASE__ (self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]):
A = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
if self.legacy_behaviour:
A = []
A = [self.eos_token_id, self.cur_lang_code]
else:
A = [self.cur_lang_code]
A = [self.eos_token_id]
A = self.convert_ids_to_tokens(self.prefix_tokens)
A = self.convert_ids_to_tokens(self.suffix_tokens)
A = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def SCREAMING_SNAKE_CASE__ (self : Any , __SCREAMING_SNAKE_CASE : str):
A = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
if self.legacy_behaviour:
A = []
A = [self.eos_token_id, self.cur_lang_code]
else:
A = [self.cur_lang_code]
A = [self.eos_token_id]
A = self.convert_ids_to_tokens(self.prefix_tokens)
A = self.convert_ids_to_tokens(self.suffix_tokens)
A = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""")
return
A = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 57
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__A : Tuple = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : int):
A = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE)
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : Dict):
try:
delete_repo(token=cls._token , repo_id="test-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config")
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub("test-config" , use_auth_token=self._token)
A = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id="test-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE , repo_id="test-config" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
A = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def SCREAMING_SNAKE_CASE__ (self : int):
A = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token)
A = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id="valid_org/test-config-org" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
A = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
CustomConfig.register_for_auto_class()
A = CustomConfig(attribute=4_2)
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"})
A = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig")
self.assertEqual(new_config.attribute , 4_2)
class __UpperCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A = c.n_embd + 1 # int
A = c.resid_pdrop + 1.0 # float
A = not c.scale_attn_weights # bool
A = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.n_embd , "mismatch for key: n_embd")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.resid_pdrop , "mismatch for key: resid_pdrop")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.scale_attn_weights , "mismatch for key: scale_attn_weights")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.summary_type , "mismatch for key: summary_type")
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = PretrainedConfig()
A = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"])
A = [key for key, value in config_common_kwargs.items() if value == getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)]
if len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {", ".join(__SCREAMING_SNAKE_CASE)}.""")
def SCREAMING_SNAKE_CASE__ (self : Dict):
with self.assertRaises(__SCREAMING_SNAKE_CASE):
# config is in subfolder, the following should not work without specifying the subfolder
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder")
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert")
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : int):
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 5_0_0
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__SCREAMING_SNAKE_CASE) as mock_head:
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
# This test is for deprecated behavior and can be removed in v5
A = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json")
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = AutoConfig.from_pretrained("bert-base-cased")
A = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__SCREAMING_SNAKE_CASE)
A = 2
json.dump(configuration.to_dict() , open(os.path.join(__SCREAMING_SNAKE_CASE , "config.4.0.0.json") , "w"))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A = ["config.42.0.0.json"]
A = 7_6_8
configuration.save_pretrained(__SCREAMING_SNAKE_CASE)
shutil.move(os.path.join(__SCREAMING_SNAKE_CASE , "config.4.0.0.json") , os.path.join(__SCREAMING_SNAKE_CASE , "config.42.0.0.json"))
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 7_6_8)
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
A = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
A = "v4.0.0"
A , A = new_transformers.models.auto.AutoConfig.from_pretrained(
__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A = "v3.0.0"
A = old_transformers.models.auto.AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(old_configuration.hidden_size , 7_6_8)
| 57
| 1
|
from __future__ import annotations
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : Optional[int]=None ):
__lowercase : Dict = data
__lowercase : List[Any] = None
def __repr__( self : Tuple ):
__lowercase : Any = []
__lowercase : Dict = self
while temp:
string_rep.append(F'{temp.data}' )
__lowercase : Any = temp.next
return "->".join(UpperCAmelCase_ )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> List[Any]:
if not elements_list:
raise Exception('''The Elements List is empty''' )
__lowercase : Dict = Node(elements_list[0] )
for i in range(1 , len(_UpperCAmelCase ) ):
__lowercase : Union[str, Any] = Node(elements_list[i] )
__lowercase : Union[str, Any] = current.next
return head
def UpperCAmelCase_ ( __lowerCAmelCase ) -> None:
if head_node is not None and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCAmelCase_ ( ) -> int:
from doctest import testmod
testmod()
__lowercase : Optional[int] = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(_UpperCAmelCase )
print('''Elements in Reverse:''' )
print_reverse(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 156
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : Union[str, Any] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
require_version(deps[pkg], _UpperCAmelCase )
| 138
| 0
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[str] = "▁"
_lowercase : Optional[Any] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
_lowercase : Optional[int] = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
_lowercase : str = {
"facebook/s2t-small-librispeech-asr": 1_0_2_4,
}
_lowercase : List[Any] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
_lowercase : Union[str, Any] = {"mustc": MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = MAX_MODEL_INPUT_SIZES
_a = ['input_ids', 'attention_mask']
_a = []
def __init__( self : Optional[Any], lowerCamelCase : str, lowerCamelCase : Optional[Any], lowerCamelCase : int="<s>", lowerCamelCase : List[str]="</s>", lowerCamelCase : Any="<pad>", lowerCamelCase : List[str]="<unk>", lowerCamelCase : Optional[Any]=False, lowerCamelCase : Tuple=False, lowerCamelCase : Any=None, lowerCamelCase : List[str]=None, lowerCamelCase : Optional[Dict[str, Any]] = None, **lowerCamelCase : Dict, )-> None:
lowerCamelCase__ : Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, pad_token=lowerCamelCase, do_upper_case=lowerCamelCase, do_lower_case=lowerCamelCase, tgt_lang=lowerCamelCase, lang_codes=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
lowerCamelCase__ : Any =do_upper_case
lowerCamelCase__ : Optional[int] =do_lower_case
lowerCamelCase__ : Any =load_json(lowerCamelCase )
lowerCamelCase__ : List[str] ={v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Optional[Any] =spm_file
lowerCamelCase__ : str =load_spm(lowerCamelCase, self.sp_model_kwargs )
if lang_codes is not None:
lowerCamelCase__ : str =lang_codes
lowerCamelCase__ : Union[str, Any] =LANGUAGES[lang_codes]
lowerCamelCase__ : Optional[int] =[F'''<lang:{lang}>''' for lang in self.langs]
lowerCamelCase__ : Any ={lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
lowerCamelCase__ : Tuple =self.lang_tokens
lowerCamelCase__ : str =tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCamelCase__ : Any ={}
@property
def snake_case ( self : int )-> int:
return len(self.encoder )
@property
def snake_case ( self : List[Any] )-> str:
return self._tgt_lang
@tgt_lang.setter
def snake_case ( self : Optional[int], lowerCamelCase : Union[str, Any] )-> None:
lowerCamelCase__ : Union[str, Any] =new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : str )-> None:
lowerCamelCase__ : Tuple =self.lang_code_to_id[tgt_lang]
lowerCamelCase__ : Union[str, Any] =[lang_code_id]
def snake_case ( self : int, lowerCamelCase : str )-> List[str]:
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def snake_case ( self : Optional[int], lowerCamelCase : Tuple )-> str:
return self.encoder.get(lowerCamelCase, self.encoder[self.unk_token] )
def snake_case ( self : Any, lowerCamelCase : int )-> str:
return self.decoder.get(lowerCamelCase, self.unk_token )
def snake_case ( self : Tuple, lowerCamelCase : List[str] )-> str:
lowerCamelCase__ : List[Any] =[]
lowerCamelCase__ : Tuple =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCamelCase__ : Tuple =self.sp_model.decode(lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCamelCase__ : Dict =[]
else:
current_sub_tokens.append(lowerCamelCase )
lowerCamelCase__ : int =self.sp_model.decode(lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def snake_case ( self : Tuple, lowerCamelCase : List[Any], lowerCamelCase : Optional[Any]=None )-> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase )
lowerCamelCase__ : Optional[Any] =[1] * len(self.prefix_tokens )
lowerCamelCase__ : Tuple =[1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase )) + ([0] * len(lowerCamelCase )) + suffix_ones
def snake_case ( self : Optional[int] )-> Dict:
lowerCamelCase__ : Union[str, Any] =self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] )-> Dict:
lowerCamelCase__ : Tuple =self.__dict__.copy()
lowerCamelCase__ : Any =None
return state
def __setstate__( self : List[str], lowerCamelCase : Dict )-> None:
lowerCamelCase__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase__ : Optional[Any] ={}
lowerCamelCase__ : int =load_spm(self.spm_file, self.sp_model_kwargs )
def snake_case ( self : List[str], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
lowerCamelCase__ : Optional[int] =Path(lowerCamelCase )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
lowerCamelCase__ : Tuple =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowerCamelCase__ : List[str] =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder, lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file, lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowerCamelCase__ : Union[str, Any] =self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (str(lowerCamelCase ), str(lowerCamelCase ))
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Dict[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =sentencepiece.SentencePieceProcessor(**__lowerCamelCase )
spm.Load(str(__lowerCamelCase ) )
return spm
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
with open(__lowerCamelCase , '''r''' ) as f:
return json.load(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : str ):
"""simple docstring"""
with open(__lowerCamelCase , '''w''' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=2 )
| 272
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowercase : Any = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 272
| 1
|
"""simple docstring"""
__UpperCamelCase : Optional[int] = [0, 2, 4, 6, 8]
__UpperCamelCase : Optional[int] = [1, 3, 5, 7, 9]
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCAmelCase__ : str = 0
for digit in range(10 ):
lowerCAmelCase__ : str = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase )
return result
lowerCAmelCase__ : Dict = 0
for digita in range(10 ):
lowerCAmelCase__ : int = digita
if (remainder + digita) % 2 == 0:
lowerCAmelCase__ : Optional[Any] = ODD_DIGITS
else:
lowerCAmelCase__ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowerCAmelCase__ : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def __SCREAMING_SNAKE_CASE ( A_ = 9 ):
lowerCAmelCase__ : Tuple = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 106
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case_ ( unittest.TestCase ):
def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]:
lowercase__ : Dict = parent
lowercase__ : Dict = batch_size
lowercase__ : Tuple = seq_length
lowercase__ : Dict = is_training
lowercase__ : Dict = use_attention_mask
lowercase__ : Tuple = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : List[str] = num_choices
lowercase__ : str = rescale_embeddings
lowercase__ : Optional[Any] = attention_type
lowercase__ : Optional[int] = use_bias
lowercase__ : Optional[int] = block_size
lowercase__ : str = num_random_blocks
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : int = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__A : List[str] = False
__A : Any = False
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : List[str] ) -> Any:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Tuple ) -> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
super().test_hidden_states_output()
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for model_class_name in self.all_model_classes:
lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[int]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : str ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
lowercase__ : int = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 87
| 0
|
'''simple docstring'''
UpperCamelCase_ : Optional[int] = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
UpperCamelCase_ : Any = ['''a''', '''b''', '''c''', '''d''', '''e''']
def __a ( _UpperCamelCase: Dict , _UpperCamelCase: int , _UpperCamelCase: Dict ) -> Optional[Any]:
"""simple docstring"""
_snake_case = start
# add current to visited
visited.append(_UpperCamelCase )
_snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_snake_case = topological_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# if all neighbors visited add current to sort
sort.append(_UpperCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
for vertice in vertices:
if vertice not in visited:
_snake_case = topological_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# return sort
return sort
if __name__ == "__main__":
UpperCamelCase_ : Any = topological_sort('''a''', [], [])
print(sort)
| 362
|
'''simple docstring'''
def __a ( _UpperCamelCase: int ) -> None:
"""simple docstring"""
_snake_case = generate_pascal_triangle(_UpperCamelCase )
for row_idx in range(_UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def __a ( _UpperCamelCase: int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_snake_case = []
for current_row_idx in range(_UpperCamelCase ):
_snake_case = populate_current_row(_UpperCamelCase , _UpperCamelCase )
triangle.append(_UpperCamelCase )
return triangle
def __a ( _UpperCamelCase: list[list[int]] , _UpperCamelCase: int ) -> list[int]:
"""simple docstring"""
_snake_case = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_snake_case , _snake_case = 1, 1
for current_col_idx in range(1 , _UpperCamelCase ):
calculate_current_element(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return current_row
def __a ( _UpperCamelCase: list[list[int]] , _UpperCamelCase: list[int] , _UpperCamelCase: int , _UpperCamelCase: int , ) -> None:
"""simple docstring"""
_snake_case = triangle[current_row_idx - 1][current_col_idx - 1]
_snake_case = triangle[current_row_idx - 1][current_col_idx]
_snake_case = above_to_left_elt + above_to_right_elt
def __a ( _UpperCamelCase: int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_snake_case = [[1]]
for row_index in range(1 , _UpperCamelCase ):
_snake_case = [0] + result[-1] + [0]
_snake_case = row_index + 1
# Calculate the number of distinct elements in a row
_snake_case = sum(divmod(_UpperCamelCase , 2 ) )
_snake_case = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_snake_case = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_snake_case = row_first_half + row_second_half
result.append(_UpperCamelCase )
return result
def __a ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase: Callable , _UpperCamelCase: int ) -> None:
_snake_case = F"""{func.__name__}({value})"""
_snake_case = timeit(F"""__main__.{call}""" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 142
| 0
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : List[Any] = emb.weight.shape
_UpperCAmelCase : Optional[int] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_UpperCAmelCase : List[str] = emb.weight.data
return lin_layer
def UpperCamelCase_ ( _UpperCAmelCase : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[Any] = torch.load(_UpperCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = mam_aaa["args"] or mam_aaa["cfg"]["model"]
_UpperCAmelCase : List[Any] = mam_aaa["model"]
remove_ignore_keys_(_UpperCAmelCase )
_UpperCAmelCase : int = state_dict["encoder.embed_tokens.weight"].shape[0]
_UpperCAmelCase : Tuple = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
_UpperCAmelCase : Union[str, Any] = state_dict["decoder.embed_tokens.weight"]
_UpperCAmelCase : Union[str, Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
__SCREAMING_SNAKE_CASE : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 31
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __snake_case :
def __init__( self : Tuple , A_ : Any , A_ : Tuple=1_3 , A_ : str=7 , A_ : Any=True , A_ : Union[str, Any]=True , A_ : int=False , A_ : int=True , A_ : List[Any]=9_9 , A_ : Dict=6_4 , A_ : int=5 , A_ : List[Any]=4 , A_ : Optional[Any]=6_4 , A_ : str="gelu" , A_ : Union[str, Any]=0.1 , A_ : List[Any]=0.1 , A_ : Any=5_1_2 , A_ : Union[str, Any]=1_6 , A_ : str=2 , A_ : Any=0.02 , A_ : str=3 , A_ : Optional[int]=4 , A_ : int=None , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : List[Any] = seq_length
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : Union[str, Any] = use_input_mask
lowerCAmelCase_ : Tuple = use_token_type_ids
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : int = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : List[str] = num_attention_heads
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = type_vocab_size
lowerCAmelCase_ : Any = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : str = num_labels
lowerCAmelCase_ : List[str] = num_choices
lowerCAmelCase_ : Optional[Any] = scope
def UpperCAmelCase__ ( self : Dict):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''')
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase_ : int = None
if self.use_input_mask:
lowerCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Dict , A_ : Dict , A_ : int , A_ : Tuple , A_ : List[str] , A_ : str , A_ : List[Any]):
lowerCAmelCase_ : int = MPNetModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Any = model(A_ , A_)
lowerCAmelCase_ : Union[str, Any] = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self : List[str] , A_ : Union[str, Any] , A_ : List[Any] , A_ : Optional[int] , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any):
lowerCAmelCase_ : Any = MPNetForQuestionAnswering(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : int = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Tuple , A_ : List[str] , A_ : Optional[Any] , A_ : Dict , A_ : Union[str, Any] , A_ : Tuple):
lowerCAmelCase_ : Tuple = self.num_labels
lowerCAmelCase_ : Any = MPNetForSequenceClassification(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Dict = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tuple , A_ : Dict , A_ : Tuple , A_ : Dict , A_ : List[str] , A_ : List[Any]):
lowerCAmelCase_ : int = self.num_choices
lowerCAmelCase_ : List[str] = MPNetForMultipleChoice(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ : int = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ : Optional[int] = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Any , A_ : int , A_ : Any , A_ : List[Any] , A_ : Any , A_ : Union[str, Any]):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Tuple = MPNetForTokenClassification(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[int] = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) : Union[str, Any] = config_and_inputs
lowerCAmelCase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_a = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = True
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[Any] = MPNetModelTester(self)
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=A_ , hidden_size=3_7)
def UpperCAmelCase__ ( self : Any):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*A_)
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*A_)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*A_)
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = MPNetModel.from_pretrained('''microsoft/mpnet-base''')
lowerCAmelCase_ : Optional[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
lowerCAmelCase_ : Union[str, Any] = model(A_)[0]
lowerCAmelCase_ : Optional[int] = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , A_)
lowerCAmelCase_ : Tuple = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]])
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4))
| 103
| 0
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,*lowerCamelCase__ : int ,lowerCamelCase__ : str=None ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Tuple=None ,**lowerCamelCase__ : str ) -> int:
'''simple docstring'''
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = eval_examples
SCREAMING_SNAKE_CASE = post_process_function
SCREAMING_SNAKE_CASE = quant_trainer_args
SCREAMING_SNAKE_CASE = 128 # default number of calibration samples
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Optional[Any]=None ) -> int:
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE = self._remove_unused_columns(lowerCamelCase__ ,description="""Calibration""" )
return DataLoader(
lowerCamelCase__ ,batch_size=self.args.eval_batch_size ,collate_fn=self.data_collator ,drop_last=self.args.dataloader_drop_last ,num_workers=self.args.dataloader_num_workers ,pin_memory=self.args.dataloader_pin_memory ,shuffle=lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : List[str]=None ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE = self.get_calib_dataloader(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(lowerCamelCase__ ,self.quant_trainer_args ,calib=lowerCamelCase__ )
model.eval()
quant_trainer.enable_calibration(lowerCamelCase__ )
logger.info("""***** Running calibration *****""" )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(lowerCamelCase__ ):
# Prediction step
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.prediction_step(lowerCamelCase__ ,lowerCamelCase__ ,prediction_loss_only=lowerCamelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowerCamelCase__ ,self.quant_trainer_args )
SCREAMING_SNAKE_CASE = model
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : str = "eval" ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE = self.get_eval_dataloader(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE = eval_loop(
lowerCamelCase__ ,description="""Evaluation""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=lowerCamelCase__ ,)
finally:
SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE = self.post_process_function(lowerCamelCase__ ,lowerCamelCase__ ,output.predictions )
SCREAMING_SNAKE_CASE = self.compute_metrics(lowerCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
SCREAMING_SNAKE_CASE = metrics.pop(lowerCamelCase__ )
self.log(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,lowerCamelCase__ )
return metrics
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : str = "test" ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_test_dataloader(lowerCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE = eval_loop(
lowerCamelCase__ ,description="""Prediction""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=lowerCamelCase__ ,)
finally:
SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE = self.post_process_function(lowerCamelCase__ ,lowerCamelCase__ ,output.predictions ,"""predict""" )
SCREAMING_SNAKE_CASE = self.compute_metrics(lowerCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
SCREAMING_SNAKE_CASE = metrics.pop(lowerCamelCase__ )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Dict="./" ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.eval_dataset
SCREAMING_SNAKE_CASE = self.get_eval_dataloader(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = next(iter(lowerCamelCase__ ) )
# saving device - to make it consistent
SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
SCREAMING_SNAKE_CASE = tuple(v.to(lowerCamelCase__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.model.to(lowerCamelCase__ )
model.eval()
model.float()
SCREAMING_SNAKE_CASE = model.module if hasattr(lowerCamelCase__ ,"""module""" ) else model
quant_trainer.configure_model(lowerCamelCase__ ,self.quant_trainer_args )
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""model.onnx""" )
logger.info(F"""exporting model to {output_model_file}""" )
SCREAMING_SNAKE_CASE = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,export_params=lowerCamelCase__ ,opset_version=13 ,do_constant_folding=lowerCamelCase__ ,input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] ,output_names=["""output_start_logits""", """output_end_logits"""] ,dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} ,verbose=lowerCamelCase__ ,)
logger.info("""onnx export finished""" )
| 354
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 193
| 0
|
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def snake_case ( self ):
with self.assertRaises(__a ):
__lowerCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def snake_case ( self ):
with self.assertRaises(__a ):
__lowerCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def snake_case ( self ):
__lowerCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def snake_case ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__lowerCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def snake_case ( self ):
__lowerCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def snake_case ( self ):
__lowerCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def snake_case ( self ):
__lowerCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def snake_case ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__lowerCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def snake_case ( self ):
__lowerCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def snake_case ( self ):
__lowerCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def snake_case ( self ):
import PIL.Image
__lowerCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=__a ) as mock_cast_to_python_objects:
__lowerCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
__lowerCAmelCase , __lowerCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , __a )
self.assertFalse(kwargs["optimize_list_casting"] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = pa.BufferReader(_UpperCamelCase ) if isinstance(_UpperCamelCase , pa.Buffer ) else pa.memory_map(_UpperCamelCase )
__lowerCAmelCase = pa.ipc.open_stream(_UpperCamelCase )
__lowerCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = pa.BufferOutputStream()
__lowerCAmelCase = pa.schema(_UpperCamelCase ) if fields else None
with ArrowWriter(stream=_UpperCamelCase , schema=_UpperCamelCase , writer_batch_size=_UpperCamelCase ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(_UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = pa.BufferOutputStream()
__lowerCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=_UpperCamelCase , features=_UpperCamelCase ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
__lowerCAmelCase = pa.BufferReader(output.getvalue() )
__lowerCAmelCase = pa.ipc.open_stream(_UpperCamelCase )
__lowerCAmelCase = f.read_all()
__lowerCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_UpperCamelCase )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_UpperCamelCase , writer_batch_size=_UpperCamelCase , hash_salt="split_name" , check_duplicates=_UpperCamelCase , ) as writer:
with pytest.raises(_UpperCamelCase ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_UpperCamelCase , writer_batch_size=_UpperCamelCase , hash_salt="split_name" , check_duplicates=_UpperCamelCase , ) as writer:
with pytest.raises(_UpperCamelCase ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_UpperCamelCase , writer_batch_size=_UpperCamelCase , hash_salt="split_name" , check_duplicates=_UpperCamelCase , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = pa.BufferOutputStream()
__lowerCAmelCase = pa.schema(_UpperCamelCase ) if fields else None
with ArrowWriter(stream=_UpperCamelCase , schema=_UpperCamelCase , writer_batch_size=_UpperCamelCase ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(_UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = pa.BufferOutputStream()
__lowerCAmelCase = pa.schema(_UpperCamelCase ) if fields else None
with ArrowWriter(stream=_UpperCamelCase , schema=_UpperCamelCase , writer_batch_size=_UpperCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(_UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = pa.BufferOutputStream()
__lowerCAmelCase = pa.schema(_UpperCamelCase ) if fields else None
with ArrowWriter(stream=_UpperCamelCase , schema=_UpperCamelCase , writer_batch_size=_UpperCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(_UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
__lowerCAmelCase = os.path.join(_UpperCamelCase , "test.arrow" )
with ArrowWriter(path=_UpperCamelCase , schema=pa.schema(_UpperCamelCase ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_UpperCamelCase , metadata=writer._schema.metadata )
_check_output(_UpperCamelCase , 1 )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if pa.types.is_list(_UpperCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if isinstance(lst[0] , _UpperCamelCase ):
change_first_primitive_element_in_list(lst[0] , _UpperCamelCase )
else:
__lowerCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = pa.array(TypedSequence(_UpperCamelCase , optimized_int_type=_UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = pa.array(OptimizedTypedSequence(_UpperCamelCase , col=_UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
__lowerCAmelCase = copy.deepcopy(_UpperCamelCase )
__lowerCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = pa.array(OptimizedTypedSequence(_UpperCamelCase , col=_UpperCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=_UpperCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=_UpperCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_UpperCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_UpperCamelCase ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
__lowerCAmelCase = pa.BufferReader(output.getvalue() )
__lowerCAmelCase = pq.read_table(_UpperCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
import PIL.Image
__lowerCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_UpperCamelCase , format="png" )
__lowerCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_UpperCamelCase , features=Features({"image": Image()} ) , embed_local_files=_UpperCamelCase ) as writer:
writer.write({"image": image_path} )
writer.finalize()
__lowerCAmelCase = pa.BufferReader(output.getvalue() )
__lowerCAmelCase = pq.read_table(_UpperCamelCase )
__lowerCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , _UpperCamelCase )
with open(_UpperCamelCase , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=_UpperCamelCase )] )
__lowerCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_UpperCamelCase ) as writer:
writer._build_writer(inferred_schema=_UpperCamelCase )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 57
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( _UpperCamelCase = 4 ):
'''simple docstring'''
__lowerCAmelCase = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = matrix[::-1]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [x[::-1] for x in matrix]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
A : Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A : List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A : str = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 57
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__a = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__a = cvtColor(img, COLOR_BGR2GRAY)
def __lowercase ( ) ->str:
"""simple docstring"""
lowercase : str = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowercase ( ) ->List[str]:
"""simple docstring"""
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase, 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Optional[Any] = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = imread('''digital_image_processing/image_data/lena_small.jpg''', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowercase : Optional[Any] = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def __lowercase ( ) ->Dict:
"""simple docstring"""
assert gg.gaussian_filter(_UpperCamelCase, 5, sigma=0.9 ).all()
def __lowercase ( ) ->Any:
"""simple docstring"""
lowercase : List[str] = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
lowercase : Any = conv.img_convolve(_UpperCamelCase, _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def __lowercase ( ) ->List[str]:
"""simple docstring"""
assert med.median_filter(_UpperCamelCase, 3 ).any()
def __lowercase ( ) ->str:
"""simple docstring"""
lowercase , lowercase : Dict = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : int = sp.make_sepia(_UpperCamelCase, 20 )
assert sepia.all()
def __lowercase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ) ->Dict:
"""simple docstring"""
lowercase : Tuple = bs.Burkes(imread(_UpperCamelCase, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def __lowercase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg", ) ->Dict:
"""simple docstring"""
lowercase : List[Any] = rs.NearestNeighbour(imread(_UpperCamelCase, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def __lowercase ( ) ->Optional[Any]:
"""simple docstring"""
lowercase : Optional[Any] = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowercase : str = imread(_UpperCamelCase, 0 )
# Test for get_neighbors_pixel function() return not None
lowercase : str = 0
lowercase : Optional[Any] = 0
lowercase : Optional[int] = image[x_coordinate][y_coordinate]
lowercase : Any = lbp.get_neighbors_pixel(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowercase : Optional[int] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
lowercase : List[Any] = lbp.local_binary_value(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
assert lbp_image.any()
| 173
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0.9_9_9, _UpperCamelCase="cosine", ) ->Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase : List[str] = []
for i in range(_UpperCamelCase ):
lowercase : List[str] = i / num_diffusion_timesteps
lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCamelCase ) / alpha_bar_fn(_UpperCamelCase ), _UpperCamelCase ) )
return torch.tensor(_UpperCamelCase, dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE ( A__ , A__ ):
A : Any = [e.name for e in KarrasDiffusionSchedulers]
A : Dict = 2
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 1000 , SCREAMING_SNAKE_CASE__ = 0.00085 , SCREAMING_SNAKE_CASE__ = 0.012 , SCREAMING_SNAKE_CASE__ = "linear" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "epsilon" , SCREAMING_SNAKE_CASE__ = "linspace" , SCREAMING_SNAKE_CASE__ = 0 , ):
if trained_betas is not None:
lowercase : str = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase : Union[str, Any] = torch.linspace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase : str = betas_for_alpha_bar(SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowercase : Optional[int] = 1.0 - self.betas
lowercase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
if schedule_timesteps is None:
lowercase : Union[str, Any] = self.timesteps
lowercase : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowercase : List[Any] = 1 if len(SCREAMING_SNAKE_CASE__ ) > 1 else 0
else:
lowercase : int = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep
lowercase : Optional[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
lowercase : Optional[Any] = self.index_for_timestep(SCREAMING_SNAKE_CASE__ )
if self.state_in_first_order:
lowercase : Any = self.sigmas[step_index]
else:
lowercase : Optional[int] = self.sigmas_interpol[step_index]
lowercase : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
lowercase : Any = num_inference_steps
lowercase : Optional[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase : Dict = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase : Union[str, Any] = (np.arange(0 , SCREAMING_SNAKE_CASE__ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase : Dict = (np.arange(SCREAMING_SNAKE_CASE__ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE__ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
lowercase : int = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowercase : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.interp(SCREAMING_SNAKE_CASE__ , np.arange(0 , len(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowercase : str = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ )
# interpolate sigmas
lowercase : int = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
lowercase : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowercase : Optional[int] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
# mps does not support float64
lowercase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
else:
lowercase : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
# interpolate timesteps
lowercase : Any = self.sigma_to_t(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ , dtype=timesteps.dtype )
lowercase : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
lowercase : Dict = torch.cat([timesteps[:1], interleaved_timesteps] )
lowercase : int = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase : Dict = defaultdict(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
# get log sigma
lowercase : Any = sigma.log()
# get distribution
lowercase : Optional[Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowercase : List[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowercase : str = low_idx + 1
lowercase : Union[str, Any] = self.log_sigmas[low_idx]
lowercase : Union[str, Any] = self.log_sigmas[high_idx]
# interpolate sigmas
lowercase : Dict = (low - log_sigma) / (low - high)
lowercase : Union[str, Any] = w.clamp(0 , 1 )
# transform interpolation to time range
lowercase : List[str] = (1 - w) * low_idx + w * high_idx
lowercase : Tuple = t.view(sigma.shape )
return t
@property
def __lowerCamelCase ( self ):
return self.sample is None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , ):
lowercase : Optional[Any] = self.index_for_timestep(SCREAMING_SNAKE_CASE__ )
# advance index counter by 1
lowercase : Dict = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase : Union[str, Any] = self.sigmas[step_index]
lowercase : List[Any] = self.sigmas_interpol[step_index + 1]
lowercase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowercase : Any = self.sigmas[step_index - 1]
lowercase : List[Any] = self.sigmas_interpol[step_index]
lowercase : Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase : Union[str, Any] = 0
lowercase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase : Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase : Union[str, Any] = sigma_interpol - sigma_hat
# store for 2nd order step
lowercase : Optional[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowercase : str = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowercase : str = sigma_next - sigma_hat
lowercase : List[str] = self.sample
lowercase : Optional[int] = None
lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase : int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE__ ):
# mps does not support float64
lowercase : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowercase : List[Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowercase : List[str] = self.timesteps.to(original_samples.device )
lowercase : Any = timesteps.to(original_samples.device )
lowercase : Tuple = [self.index_for_timestep(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for t in timesteps]
lowercase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowercase : Any = sigma.unsqueeze(-1 )
lowercase : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 173
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__lowercase = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''ernie_m'''
UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 272
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = 'roberta'
def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : Dict = vocab_size
UpperCamelCase : Dict = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Union[str, Any] = hidden_act
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = max_position_embeddings
UpperCamelCase : Any = type_vocab_size
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Any = position_embedding_type
UpperCamelCase : Union[str, Any] = use_cache
UpperCamelCase : Tuple = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 140
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Dict = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :int = AlbertTokenizer
_UpperCAmelCase :int = AlbertTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :List[str] = True
_UpperCAmelCase :Optional[Any] = True
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase : List[str] = AlbertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = "this is a test"
UpperCamelCase : List[Any] = "this is a test"
return input_text, output_text
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = "<pad>"
UpperCamelCase : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(A_ ) , 3_0000 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase : Any = self.get_tokenizer()
UpperCamelCase : Optional[int] = self.get_rust_tokenizer()
UpperCamelCase : List[Any] = "I was born in 92000, and this is falsé."
UpperCamelCase : Optional[Any] = tokenizer.tokenize(A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : Optional[Any] = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : List[Any] = self.get_rust_tokenizer()
UpperCamelCase : Union[str, Any] = tokenizer.encode(A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = AlbertTokenizer(A_ , keep_accents=A_ )
UpperCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(A_ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [48, 25, 21, 1289] )
UpperCamelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
UpperCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(A_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = AlbertTokenizer(A_ )
UpperCamelCase : Dict = tokenizer.encode("sequence builders" )
UpperCamelCase : Tuple = tokenizer.encode("multi-sequence build" )
UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(A_ )
UpperCamelCase : int = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 140
| 1
|
def __lowercase ( __lowerCAmelCase : str ):
a__ = 0
# if input_string is "aba" than new_input_string become "a|b|a"
a__ = ''''''
a__ = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
a__ = 0, 0
# length[i] shows the length of palindromic substring with center i
a__ = [1 for i in range(len(__lowerCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
a__ = 0
for j in range(len(__lowerCAmelCase ) ):
a__ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
a__ = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
a__ = j - k + 1 # noqa: E741
a__ = j + k - 1
# update max_length and start position
if max_length < length[j]:
a__ = length[j]
a__ = j
# create that string
a__ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 240
|
import argparse
import os
import re
import packaging.version
_A : Optional[int] = 'examples/'
_A : str = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_A : Any = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_A : List[str] = 'README.md'
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : Tuple = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Union[str, Any] = replace.replace('''VERSION''' , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = re_pattern.sub(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , pattern='''examples''' )
def _a ( UpperCAmelCase , UpperCAmelCase=False ) -> Dict:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not patch:
update_version_in_examples(UpperCAmelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Any = '''🤗 Transformers currently provides the following architectures'''
lowerCamelCase__ : Dict = '''1. Want to contribute a new model?'''
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : str = f.readlines()
# Find the start of the list.
lowerCamelCase__ : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCamelCase__ : Any = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ : Any = REPLACE_PATTERNS['''init'''][0].search(UpperCAmelCase ).groups()[0]
return packaging.version.parse(UpperCAmelCase )
def _a ( UpperCAmelCase=False ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCamelCase__ : Union[str, Any] = default_version.base_version
elif patch:
lowerCamelCase__ : str = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
lowerCamelCase__ : Dict = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : str = input(f"Which version are you releasing? [{default_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : int = default_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase , patch=UpperCAmelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = get_version()
lowerCamelCase__ : Optional[int] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
lowerCamelCase__ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : Dict = input(f"Which version are we developing now? [{dev_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : Tuple = dev_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_A : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 142
| 0
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase__ ( A__ : Optional[Any] ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase__ ( ):
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
__lowerCamelCase = [1, 2, 3]
with pytest.raises(A__ ):
with parallel_backend("""unsupported backend""" ):
map_nested(A__ , A__ , num_proc=2 )
with pytest.raises(A__ ):
with parallel_backend("""unsupported backend""" ):
map_nested(A__ , A__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = [1, 2]
__lowerCamelCase = {"""a""": 1, """b""": 2}
__lowerCamelCase = {"""a""": [1, 2], """b""": [3, 4]}
__lowerCamelCase = {"""a""": {"""1""": 1}, """b""": 2}
__lowerCamelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__lowerCamelCase = [2, 3]
__lowerCamelCase = {"""a""": 2, """b""": 3}
__lowerCamelCase = {"""a""": [2, 3], """b""": [4, 5]}
__lowerCamelCase = {"""a""": {"""1""": 2}, """b""": 3}
__lowerCamelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
| 371
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 29
| 0
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_SCREAMING_SNAKE_CASE : str = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
a = None
a = "utf-8"
a = None
a = None
a = True # deprecated
a = None # deprecated
a = 10 << 20 # 10MB
a = None
class UpperCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
a = JsonConfig
def lowercase_ ( self : Dict ) -> int:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
SCREAMING_SNAKE_CASE__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : str ) -> Any:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE__ = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = [files]
SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
SCREAMING_SNAKE_CASE__ = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = [files]
SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE__ = self.config.features.arrow_schema.field(__lowerCamelCase ).type
SCREAMING_SNAKE_CASE__ = pa_table.append_column(__lowerCamelCase , pa.array([None] * len(__lowerCamelCase ) , type=__lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE__ = table_cast(__lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> int:
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE__ = json.load(__lowerCamelCase )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCamelCase , (list, tuple) ):
SCREAMING_SNAKE_CASE__ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE__ = {col: [row.get(__lowerCamelCase ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE__ = dataset
SCREAMING_SNAKE_CASE__ = pa.Table.from_pydict(__lowerCamelCase )
yield file_idx, self._cast_table(__lowerCamelCase )
# If the file has one json object per line
else:
with open(__lowerCamelCase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE__ = max(self.config.chunksize // 32 , 16 << 10 )
SCREAMING_SNAKE_CASE__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
SCREAMING_SNAKE_CASE__ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE__ = batch.decode(self.config.encoding , errors=__lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
SCREAMING_SNAKE_CASE__ = paj.read_json(
io.BytesIO(__lowerCamelCase ) , read_options=paj.ReadOptions(block_size=__lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(__lowerCamelCase )
or block_size > len(__lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(__lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE__ = json.load(__lowerCamelCase )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(__lowerCamelCase )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCamelCase , __lowerCamelCase ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE__ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE__ = {col: [row.get(__lowerCamelCase ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE__ = pa.Table.from_pydict(__lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__lowerCamelCase )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(__lowerCamelCase )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(__lowerCamelCase )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCamelCase )
batch_idx += 1
| 314
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__: List[Any] = logging.getLogger()
def UpperCamelCase__( )->Union[str, Any]:
A__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
A__ = parser.parse_args()
return args.f
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def UpperCamelCase ( self ):
A__ = logging.StreamHandler(sys.stdout )
logger.addHandler(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0,'''run_glue_deebert.py''' )
with patch.object(__lowerCamelCase,'''argv''',__lowerCamelCase ):
A__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__lowerCamelCase,0.666 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase ( self ):
A__ = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__lowerCamelCase )
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__lowerCamelCase )
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__lowerCamelCase )
| 193
| 0
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_snake_case = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
lowerCamelCase__ = 'utf-8'
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True # deprecated
lowerCamelCase__ = None # deprecated
lowerCamelCase__ = 10 << 20 # 10MB
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = JsonConfig
def snake_case__ ( self):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
_lowerCAmelCase : int = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.")
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : Tuple = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : Union[str, Any] = data_files
if isinstance(__a, __a):
_lowerCAmelCase : str = [files]
_lowerCAmelCase : int = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : str = [files]
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
_lowerCAmelCase : Optional[Any] = self.config.features.arrow_schema.field(__a).type
_lowerCAmelCase : str = pa_table.append_column(__a, pa.array([None] * len(__a), type=__a))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : List[Any] = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__a)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__a, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
_lowerCAmelCase : Any = json.load(__a)
# We keep only the field we are interested in
_lowerCAmelCase : Union[str, Any] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__a, (list, tuple)):
_lowerCAmelCase : List[Any] = set().union(*[row.keys() for row in dataset])
_lowerCAmelCase : Any = {col: [row.get(__a) for row in dataset] for col in keys}
else:
_lowerCAmelCase : Dict = dataset
_lowerCAmelCase : Dict = pa.Table.from_pydict(__a)
yield file_idx, self._cast_table(__a)
# If the file has one json object per line
else:
with open(__a, "rb") as f:
_lowerCAmelCase : Tuple = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_lowerCAmelCase : List[Any] = max(self.config.chunksize // 32, 16 << 10)
_lowerCAmelCase : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
_lowerCAmelCase : Any = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__a)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_lowerCAmelCase : int = batch.decode(self.config.encoding, errors=__a).encode("utf-8")
try:
while True:
try:
_lowerCAmelCase : Optional[Any] = paj.read_json(
io.BytesIO(__a), read_options=paj.ReadOptions(block_size=__a))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__a, pa.ArrowInvalid)
and "straddling" not in str(__a)
or block_size > len(__a)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(__a)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__a, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
_lowerCAmelCase : Dict = json.load(__a)
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(__a)}: {e}")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__a, __a): # list is the only sequence type supported in JSON
try:
_lowerCAmelCase : str = set().union(*[row.keys() for row in dataset])
_lowerCAmelCase : str = {col: [row.get(__a) for row in dataset] for col in keys}
_lowerCAmelCase : List[str] = pa.Table.from_pydict(__a)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(__a)}: {e}")
raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
yield file_idx, self._cast_table(__a)
break
else:
logger.error(f"Failed to read file '{file}' with error {type(__a)}: {e}")
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. ") from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__a)
batch_idx += 1
| 352
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_UpperCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =state_dict.pop(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =val
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE_: Tuple =key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
SCREAMING_SNAKE_CASE_: List[str] =value
else:
SCREAMING_SNAKE_CASE_: str =value
return new_state_dict
def __magic_name__ ( lowercase , lowercase=False ):
SCREAMING_SNAKE_CASE_: Optional[int] =""""""
if is_panoptic:
SCREAMING_SNAKE_CASE_: int ="""conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_: Optional[Any] =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE_: Optional[Any] =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_: Optional[int] =in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE_: int =in_proj_bias[:256]
SCREAMING_SNAKE_CASE_: Optional[int] =in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE_: Union[str, Any] =in_proj_bias[256:512]
SCREAMING_SNAKE_CASE_: Optional[Any] =in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE_: Any =in_proj_bias[-256:]
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[int] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE_: str =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE_: Dict ="""resnet101"""
if "dc5" in model_name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =True
SCREAMING_SNAKE_CASE_: Any ="""panoptic""" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE_: int =250
else:
SCREAMING_SNAKE_CASE_: Dict =91
SCREAMING_SNAKE_CASE_: Optional[int] ="""huggingface/label-files"""
SCREAMING_SNAKE_CASE_: List[str] ="""coco-detection-id2label.json"""
SCREAMING_SNAKE_CASE_: Optional[Any] =json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: Optional[Any] ={int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: List[str] =idalabel
SCREAMING_SNAKE_CASE_: Dict ={v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE_: int ="""coco_panoptic""" if is_panoptic else """coco_detection"""
SCREAMING_SNAKE_CASE_: Optional[int] =ConditionalDetrImageProcessor(format=lowercase )
# prepare image
SCREAMING_SNAKE_CASE_: Optional[int] =prepare_img()
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE_: str =encoding["""pixel_values"""]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
SCREAMING_SNAKE_CASE_: Any =torch.hub.load("""DeppMeng/ConditionalDETR""" , lowercase , pretrained=lowercase ).eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] =conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE_: Dict ="""conditional_detr.""" + src
rename_key(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =rename_backbone_keys(lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase , is_panoptic=lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE_: int ="""conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
SCREAMING_SNAKE_CASE_: Any =state_dict.pop(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE_: List[str] =state_dict.pop(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
SCREAMING_SNAKE_CASE_: List[str] =state_dict.pop(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
SCREAMING_SNAKE_CASE_: Optional[Any] =state_dict.pop(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE_: List[Any] =ConditionalDetrForSegmentation(lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase )
model.load_state_dict(lowercase )
model.eval()
model.push_to_hub(repo_id=lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
SCREAMING_SNAKE_CASE_: Optional[Any] =conditional_detr(lowercase )
SCREAMING_SNAKE_CASE_: Dict =model(lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_UpperCAmelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 173
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
_UpperCAmelCase = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
_UpperCAmelCase = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
_UpperCAmelCase = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[Any]="binary" , lowerCAmelCase : int=None , lowerCAmelCase : str="warn" , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =recall_score(
lowerCAmelCase , lowerCAmelCase , labels=lowerCAmelCase , pos_label=lowerCAmelCase , average=lowerCAmelCase , sample_weight=lowerCAmelCase , zero_division=lowerCAmelCase , )
return {"recall": float(lowerCAmelCase ) if score.size == 1 else score}
| 173
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=1_8 , __UpperCamelCase=3_0 , __UpperCamelCase=4_0_0 , __UpperCamelCase=True , __UpperCamelCase=3_2 , __UpperCamelCase=True , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size_divisor
UpperCamelCase_ = do_rescale
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowercase_ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Any = GLPNImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = GLPNImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size_divisor""" ) )
self.assertTrue(hasattr(lowercase_ , """resample""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 371
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( a__ : Dict , a__ : Dict=None ) -> Union[str, Any]:
UpperCamelCase_ = None
if token is not None:
UpperCamelCase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
UpperCamelCase_ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
UpperCamelCase_ = requests.get(a__ , headers=a__ ).json()
UpperCamelCase_ = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
UpperCamelCase_ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(a__ ):
UpperCamelCase_ = requests.get(url + f'''&page={i + 2}''' , headers=a__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase__ ( a__ : Union[str, Any] , a__ : Any=None ) -> Optional[int]:
UpperCamelCase_ = None
if token is not None:
UpperCamelCase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
UpperCamelCase_ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
UpperCamelCase_ = requests.get(a__ , headers=a__ ).json()
UpperCamelCase_ = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
UpperCamelCase_ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(a__ ):
UpperCamelCase_ = requests.get(url + f'''&page={i + 2}''' , headers=a__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase__ ( a__ : Dict , a__ : Tuple , a__ : Union[str, Any] , a__ : List[Any] ) -> List[Any]:
UpperCamelCase_ = None
if token is not None:
UpperCamelCase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
UpperCamelCase_ = requests.get(a__ , headers=a__ , allow_redirects=a__ )
UpperCamelCase_ = result.headers["""Location"""]
UpperCamelCase_ = requests.get(a__ , allow_redirects=a__ )
UpperCamelCase_ = os.path.join(a__ , f'''{artifact_name}.zip''' )
with open(a__ , """wb""" ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( a__ : Dict , a__ : Tuple=None ) -> Optional[int]:
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = None
with zipfile.ZipFile(a__ ) as z:
for filename in z.namelist():
if not os.path.isdir(a__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(a__ ) as f:
for line in f:
UpperCamelCase_ = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCamelCase_ = line[: line.index(""": """ )]
UpperCamelCase_ = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
UpperCamelCase_ = line[len("""FAILED """ ) :]
failed_tests.append(a__ )
elif filename == "job_name.txt":
UpperCamelCase_ = line
if len(a__ ) != len(a__ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(a__ )} for `errors` '''
f'''and {len(a__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
UpperCamelCase_ = None
if job_name and job_links:
UpperCamelCase_ = job_links.get(a__ , a__ )
# A list with elements of the form (line of error, error, failed test)
UpperCamelCase_ = [x + [y] + [job_link] for x, y in zip(a__ , a__ )]
return result
def lowerCamelCase__ ( a__ : Any , a__ : Union[str, Any]=None ) -> Dict:
UpperCamelCase_ = []
UpperCamelCase_ = [os.path.join(a__ , a__ ) for p in os.listdir(a__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(a__ , job_links=a__ ) )
return errors
def lowerCamelCase__ ( a__ : Union[str, Any] , a__ : Tuple=None ) -> List[Any]:
UpperCamelCase_ = Counter()
counter.update([x[1] for x in logs] )
UpperCamelCase_ = counter.most_common()
UpperCamelCase_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCamelCase_ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCamelCase_ = dict(sorted(r.items() , key=lambda a__ : item[1]["count"] , reverse=a__ ) )
return r
def lowerCamelCase__ ( a__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase_ = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
UpperCamelCase_ = test.split("""/""" )[2]
else:
UpperCamelCase_ = None
return test
def lowerCamelCase__ ( a__ : List[str] , a__ : Optional[int]=None ) -> Dict:
UpperCamelCase_ = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCamelCase_ = [x for x in logs if x[2] is not None]
UpperCamelCase_ = {x[2] for x in logs}
UpperCamelCase_ = {}
for test in tests:
UpperCamelCase_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCamelCase_ = counter.most_common()
UpperCamelCase_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCamelCase_ = sum(error_counts.values() )
if n_errors > 0:
UpperCamelCase_ = {"""count""": n_errors, """errors""": error_counts}
UpperCamelCase_ = dict(sorted(r.items() , key=lambda a__ : item[1]["count"] , reverse=a__ ) )
return r
def lowerCamelCase__ ( a__ : Any ) -> List[Any]:
UpperCamelCase_ = """| no. | error | status |"""
UpperCamelCase_ = """|-:|:-|:-|"""
UpperCamelCase_ = [header, sep]
for error in reduced_by_error:
UpperCamelCase_ = reduced_by_error[error]["""count"""]
UpperCamelCase_ = f'''| {count} | {error[:100]} | |'''
lines.append(a__ )
return "\n".join(a__ )
def lowerCamelCase__ ( a__ : Optional[int] ) -> str:
UpperCamelCase_ = """| model | no. of errors | major error | count |"""
UpperCamelCase_ = """|-:|-:|-:|-:|"""
UpperCamelCase_ = [header, sep]
for model in reduced_by_model:
UpperCamelCase_ = reduced_by_model[model]["""count"""]
UpperCamelCase_ , UpperCamelCase_ = list(reduced_by_model[model]["""errors"""].items() )[0]
UpperCamelCase_ = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(a__ )
return "\n".join(a__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
_A = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_A = get_job_links(args.workflow_run_id, token=args.token)
_A = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_A = k.find(''' / ''')
_A = k[index + len(''' / ''') :]
_A = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_A = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_A = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_A = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_A = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_A = reduce_by_error(errors)
_A = reduce_by_model(errors)
_A = make_github_table(reduced_by_error)
_A = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 261
| 0
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''data2vec-audio'''
def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_6 , lowercase=1_9 , lowercase=5 , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=5_1_2 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
A_ : str = hidden_size
A_ : Optional[int] = feat_extract_activation
A_ : Any = list(lowercase )
A_ : Tuple = list(lowercase )
A_ : Dict = list(lowercase )
A_ : List[Any] = conv_bias
A_ : str = num_conv_pos_embeddings
A_ : int = num_conv_pos_embedding_groups
A_ : Optional[Any] = conv_pos_kernel_size
A_ : int = len(self.conv_dim )
A_ : Any = num_hidden_layers
A_ : str = intermediate_size
A_ : Optional[int] = hidden_act
A_ : Any = num_attention_heads
A_ : Optional[int] = hidden_dropout
A_ : Union[str, Any] = attention_dropout
A_ : Any = activation_dropout
A_ : int = feat_proj_dropout
A_ : List[str] = final_dropout
A_ : List[Any] = layerdrop
A_ : Optional[int] = layer_norm_eps
A_ : Tuple = initializer_range
A_ : Optional[int] = vocab_size
A_ : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : Optional[int] = mask_time_prob
A_ : Dict = mask_time_length
A_ : List[str] = mask_time_min_masks
A_ : List[str] = mask_feature_prob
A_ : Dict = mask_feature_length
A_ : List[Any] = mask_feature_min_masks
# ctc loss
A_ : Dict = ctc_loss_reduction
A_ : Dict = ctc_zero_infinity
# adapter
A_ : List[str] = add_adapter
A_ : List[Any] = adapter_kernel_size
A_ : List[str] = adapter_stride
A_ : Dict = num_adapter_layers
A_ : Any = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ : Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ : Optional[Any] = list(lowercase )
A_ : Dict = list(lowercase )
A_ : Tuple = list(lowercase )
A_ : int = xvector_output_dim
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return math.prod(self.conv_stride )
| 140
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''time_series_transformer'''
lowerCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowercase = None , lowercase = None , lowercase = "student_t" , lowercase = "nll" , lowercase = 1 , lowercase = [1, 2, 3, 4, 5, 6, 7] , lowercase = "mean" , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 3_2 , lowercase = 3_2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = True , lowercase = "gelu" , lowercase = 6_4 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 1_0_0 , lowercase = 0.02 , lowercase=True , **lowercase , ):
"""simple docstring"""
A_ : Tuple = prediction_length
A_ : Any = context_length or prediction_length
A_ : Any = distribution_output
A_ : Dict = loss
A_ : int = input_size
A_ : Any = num_time_features
A_ : List[str] = lags_sequence
A_ : List[Any] = scaling
A_ : str = num_dynamic_real_features
A_ : List[Any] = num_static_real_features
A_ : Optional[int] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = cardinality
else:
A_ : Any = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : int = embedding_dimension
else:
A_ : Tuple = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
A_ : Any = input_size * len(lowercase ) + self._number_of_features
A_ : List[str] = d_model
A_ : Union[str, Any] = encoder_attention_heads
A_ : int = decoder_attention_heads
A_ : int = encoder_ffn_dim
A_ : str = decoder_ffn_dim
A_ : Tuple = encoder_layers
A_ : Tuple = decoder_layers
A_ : List[str] = dropout
A_ : str = attention_dropout
A_ : List[Any] = activation_dropout
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Optional[int] = activation_function
A_ : str = init_std
A_ : int = use_cache
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 140
| 1
|
# Lint as: python3
import itertools
import os
import re
UpperCamelCase = re.compile(R'''([A-Z]+)([A-Z][a-z])''')
UpperCamelCase = re.compile(R'''([a-z\d])([A-Z])''')
UpperCamelCase = re.compile(R'''(?<!_)_(?!_)''')
UpperCamelCase = re.compile(R'''(_{2,})''')
UpperCamelCase = R'''^\w+(\.\w+)*$'''
UpperCamelCase = R'''<>:/\|?*'''
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = _uppercase_uppercase_re.sub(r"""\1_\2""" ,snake_case__ )
_SCREAMING_SNAKE_CASE = _lowercase_uppercase_re.sub(r"""\1_\2""" ,snake_case__ )
return name.lower()
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = _single_underscore_re.split(snake_case__ )
_SCREAMING_SNAKE_CASE = [_multiple_underscores_re.split(snake_case__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(snake_case__ ) if n != """""" )
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(snake_case__ ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
if os.path.basename(snake_case__ ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re ,snake_case__ ):
raise ValueError(F'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return F'{filename_prefix_for_name(snake_case__ )}-{split}'
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = filename_prefix_for_split(snake_case__ ,snake_case__ )
if filetype_suffix:
prefix += F'.{filetype_suffix}'
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,snake_case__ )
return F'{filepath}*'
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = filename_prefix_for_split(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,snake_case__ )
if shard_lengths:
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [F'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(snake_case__ )]
if filetype_suffix:
_SCREAMING_SNAKE_CASE = [filename + F'.{filetype_suffix}' for filename in filenames]
return filenames
else:
_SCREAMING_SNAKE_CASE = prefix
if filetype_suffix:
filename += F'.{filetype_suffix}'
return [filename]
| 125
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 125
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 600851475143 ) -> Tuple:
try:
_a : Tuple =int(__snake_case )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_a : List[Any] =2
_a : str =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a : str =i
while n % i == 0:
_a : List[Any] =n // i
i += 1
return int(__snake_case )
if __name__ == "__main__":
print(F"{solution() = }")
| 276
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29
| 0
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
lowerCAmelCase__ = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
lowerCAmelCase__ , lowerCAmelCase__ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowerCAmelCase__ = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
assert base_extractor.is_extractable(lowerCAmelCase__ )
lowerCAmelCase__ = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCAmelCase__ = file_path.read_text(encoding='utf-8' )
else:
lowerCAmelCase__ = output_path.read_text(encoding='utf-8' )
lowerCAmelCase__ = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
lowerCAmelCase__ = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
lowerCAmelCase__ = input_paths[compression_format]
if input_path is None:
lowerCAmelCase__ = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
lowerCAmelCase__ = Extractor.infer_extractor_format(lowerCAmelCase__ )
assert extractor_format is not None
lowerCAmelCase__ = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCAmelCase__ = file_path.read_text(encoding='utf-8' )
else:
lowerCAmelCase__ = output_path.read_text(encoding='utf-8' )
lowerCAmelCase__ = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
import tarfile
lowerCAmelCase__ = tmp_path / 'data_dot_dot'
directory.mkdir()
lowerCAmelCase__ = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(lowerCAmelCase__ , 'w' ) as f:
f.add(lowerCAmelCase__ , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def __lowerCamelCase ( lowerCAmelCase__ ):
import tarfile
lowerCAmelCase__ = tmp_path / 'data_sym_link'
directory.mkdir()
lowerCAmelCase__ = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=lowerCAmelCase__ )
with tarfile.TarFile(lowerCAmelCase__ , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
lowerCAmelCase__ = insecure_tar_files[insecure_tar_file]
lowerCAmelCase__ = tmp_path / 'extracted'
TarExtractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCamelCase ( lowerCAmelCase__ ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowerCAmelCase__ = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
lowerCAmelCase__ = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(lowerCAmelCase__ )
assert zipfile.is_zipfile(str(lowerCAmelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCAmelCase__ ) # but we're right
| 119
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class a_ :
'''simple docstring'''
UpperCAmelCase_ = None
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = None
UpperCAmelCase_ = 1
UpperCAmelCase_ = None
UpperCAmelCase_ = False
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def __snake_case ( self : Dict):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 119
| 1
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _a ( unittest.TestCase ):
def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=13 , UpperCamelCase_: int=7 , UpperCamelCase_: int=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[int]=99 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: int=4 , UpperCamelCase_: List[str]=37 , UpperCamelCase_: Tuple="gelu" , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Union[str, Any]=512 , UpperCamelCase_: Tuple=16 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: List[Any]=4 , ) -> List[str]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowerCamelCase_ ( self: Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _a ( lowerCamelCase__ , unittest.TestCase ):
_lowercase : str = True
_lowercase : Optional[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self: Tuple ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCamelCase_ )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 110
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ):
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 300
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __UpperCAmelCase :
def __init__( self: Optional[int] , UpperCAmelCase_: int , UpperCAmelCase_: List[str]=13 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Any=24 , UpperCAmelCase_: Dict=16 , UpperCAmelCase_: Any=True , UpperCAmelCase_: int=True , UpperCAmelCase_: str=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Optional[Any]=4 , UpperCAmelCase_: List[str]=37 , UpperCAmelCase_: List[Any]="gelu" , UpperCAmelCase_: Any=0.1 , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: str=10 , UpperCAmelCase_: int=0.02 , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: List[str]=2 , UpperCAmelCase_: Optional[Any]=2 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = max_length
_SCREAMING_SNAKE_CASE = num_mel_bins
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = frequency_stride
_SCREAMING_SNAKE_CASE = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_SCREAMING_SNAKE_CASE = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_SCREAMING_SNAKE_CASE = (self.max_length - self.patch_size) // self.time_stride + 1
_SCREAMING_SNAKE_CASE = frequency_out_dimension * time_out_dimension
_SCREAMING_SNAKE_CASE = num_patches + 2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_values, labels
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ASTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
__snake_case : int = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[str] = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
__snake_case : Tuple = False
__snake_case : Dict = False
__snake_case : str = False
__snake_case : List[Any] = False
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ASTModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""input_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = ASTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" ,filename="""sample_audio.flac""" ,repo_type="""dataset""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = torchaudio.load(_lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __UpperCAmelCase (unittest.TestCase ):
@cached_property
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.default_feature_extractor
_SCREAMING_SNAKE_CASE = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE = self.default_feature_extractor
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = prepare_audio()
_SCREAMING_SNAKE_CASE = audio.squeeze().numpy()
_SCREAMING_SNAKE_CASE = feature_extractor(_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**_lowerCAmelCase )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 354
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __UpperCAmelCase :
def __init__( self: Tuple , UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 37
_SCREAMING_SNAKE_CASE = """gelu"""
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 512
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0.02
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase ( self: int , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TFEsmModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ )
# Also check the case where encoder outputs are not passed
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFEsmForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Tuple = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : List[str] = False
__snake_case : Union[str, Any] = False
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_SCREAMING_SNAKE_CASE = model.get_bias()
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for k, v in name.items():
assert isinstance(UpperCAmelCase_ , tf.Variable )
else:
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert x is None
_SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCAmelCase_ )
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125
| 0
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase__ : Optional[int]=0.01 , UpperCAmelCase__ : Optional[Any]=1_000) ->Optional[int]:
'''simple docstring'''
A__ = p_stop
A__ = max_length
def __iter__( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = 0
A__ = False
while not stop and count < self.max_length:
yield count
count += 1
A__ = random.random() < self.p_stop
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Any=True) ->Optional[Any]:
'''simple docstring'''
A__ = [
BatchSamplerShard(UpperCAmelCase__ , 2 , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__)
for i in range(2)
]
A__ = [list(UpperCAmelCase__) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(UpperCAmelCase__) for shard in batch_sampler_shards] , [len(UpperCAmelCase__) for e in expected])
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Any:
'''simple docstring'''
A__ = BatchSampler(range(24) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__)
A__ = BatchSampler(range(24) , batch_size=3 , drop_last=UpperCAmelCase__)
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A__ = BatchSampler(range(21) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__)
A__ = BatchSampler(range(21) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A__ = BatchSampler(range(22) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__)
A__ = BatchSampler(range(22) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A__ = BatchSampler(range(20) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__)
A__ = BatchSampler(range(20) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__)
# Check the shards when the dataset is very small.
A__ = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__)
A__ = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
A__ = BatchSampler(range(24) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__)
A__ = BatchSampler(range(24) , batch_size=4 , drop_last=UpperCAmelCase__)
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__)
# Check the shards when the dataset is not a round multiple of batch size.
A__ = BatchSampler(range(22) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__)
A__ = BatchSampler(range(22) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A__ = BatchSampler(range(21) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__)
A__ = BatchSampler(range(21) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__)
# Check the shards when the dataset is very small.
A__ = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__)
A__ = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
'''simple docstring'''
A__ = BatchSampler(range(24) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__)
A__ = BatchSampler(range(24) , batch_size=3 , drop_last=UpperCAmelCase__)
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A__ = BatchSampler(range(21) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__)
A__ = BatchSampler(range(21) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A__ = BatchSampler(range(22) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__)
A__ = BatchSampler(range(22) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A__ = BatchSampler(range(20) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__)
A__ = BatchSampler(range(20) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__)
# Check the shards when the dataset is very small.
A__ = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__)
A__ = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCAmelCase__)
A__ = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
'''simple docstring'''
A__ = BatchSampler(range(24) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__)
A__ = BatchSampler(range(24) , batch_size=4 , drop_last=UpperCAmelCase__)
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__)
# Check the shards when the dataset is not a round multiple of batch size.
A__ = BatchSampler(range(22) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__)
A__ = BatchSampler(range(22) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A__ = BatchSampler(range(21) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__)
A__ = BatchSampler(range(21) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__)
# Check the shards when the dataset is very small.
A__ = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__)
A__ = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
A__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
A__ = [BatchSamplerShard(UpperCAmelCase__ , 2 , UpperCAmelCase__ , even_batches=UpperCAmelCase__) for i in range(2)]
self.assertEqual(len(batch_sampler_shards[0]) , 3)
self.assertEqual(len(batch_sampler_shards[1]) , 2)
self.assertListEqual(list(batch_sampler_shards[0]) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]])
self.assertListEqual(list(batch_sampler_shards[1]) , [[3, 4], [9, 10, 11]])
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[str]=False) ->Optional[Any]:
'''simple docstring'''
random.seed(UpperCAmelCase__)
A__ = list(UpperCAmelCase__)
A__ = [
IterableDatasetShard(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , drop_last=UpperCAmelCase__ , num_processes=UpperCAmelCase__ , process_index=UpperCAmelCase__ , split_batches=UpperCAmelCase__ , )
for i in range(UpperCAmelCase__)
]
A__ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(UpperCAmelCase__)
iterable_dataset_lists.append(list(UpperCAmelCase__))
A__ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
A__ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__))
self.assertTrue(len(UpperCAmelCase__) % shard_batch_size == 0)
A__ = []
for idx in range(0 , len(UpperCAmelCase__) , UpperCAmelCase__):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(UpperCAmelCase__) < len(UpperCAmelCase__):
reference += reference
self.assertListEqual(UpperCAmelCase__ , reference[: len(UpperCAmelCase__)])
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ = 42
A__ = RandomIterableDataset()
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__)
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__)
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__)
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__)
# Edge case with a very small dataset
A__ = RandomIterableDataset(max_length=2)
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__)
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__)
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__)
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = BatchSampler(range(16) , batch_size=4 , drop_last=UpperCAmelCase__)
A__ = SkipBatchSampler(UpperCAmelCase__ , 2)
self.assertListEqual(list(UpperCAmelCase__) , [[8, 9, 10, 11], [12, 13, 14, 15]])
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
A__ = SkipDataLoader(list(range(16)) , batch_size=4 , skip_batches=2)
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]])
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
A__ = DataLoader(list(range(16)) , batch_size=4)
A__ = skip_first_batches(UpperCAmelCase__ , num_batches=2)
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]])
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
A__ = DataLoaderShard(list(range(16)) , batch_size=4)
for idx, _ in enumerate(UpperCAmelCase__):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCAmelCase__):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
Accelerator()
A__ = DataLoaderDispatcher(range(16) , batch_size=4)
for idx, _ in enumerate(UpperCAmelCase__):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCAmelCase__):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
| 14
|
"""simple docstring"""
import copy
import re
class snake_case__ :
_snake_case : Dict = """hp"""
_snake_case : List[str] = {}
_snake_case : int = None
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase ):
__a = prefix
__a = defaults
cls.build_naming_info()
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return ""
__a = None
if any(char.isdigit() for char in word ):
raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase ) + 1 ):
__a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase ):
__a = ""
while integer != 0:
__a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__a = 0
while True:
__a = word + "#" + int_to_alphabetic(lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__a = sword
break
__a = short_word
__a = word
return short_word
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = param_name.split("_" )
__a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a = ["", "_"]
for separator in separators:
__a = separator.join(lowerCamelCase )
if shortname not in info["reverse_short_param"]:
__a = shortname
__a = param_name
return shortname
return param_name
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase )
__a = short_name
__a = param_name
@classmethod
def a__ ( cls ):
if cls.NAMING_INFO is not None:
return
__a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase , lowerCamelCase )
__a = info
@classmethod
def a__ ( cls , lowerCamelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = 1 if v else 0
__a = "" if isinstance(lowerCamelCase , (int, float) ) else "-"
__a = F"{key}{sep}{v}"
name.append(lowerCamelCase )
return "_".join(lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase ):
__a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a = []
else:
__a = repr.split("_" )
__a = {}
for value in values:
if "-" in value:
__a , __a = value.split("-" )
else:
__a = re.sub("[0-9.]" , "" , lowerCamelCase )
__a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) )
__a = cls.NAMING_INFO["reverse_short_param"][p_k]
__a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a = cls.DEFAULTS[k]
return parameters
| 261
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def UpperCamelCase_( _snake_case : Union[str, Any]=None ):
"""simple docstring"""
__a =argparse.ArgumentParser(add_help=_snake_case , allow_abbrev=_snake_case )
# The main config parser
__a =config_command_parser(_snake_case )
# The subparser to add commands to
__a =config_parser.add_subparsers(title='subcommands' , dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(_snake_case , parents=[parent_parser] )
update_command_parser(_snake_case , parents=[parent_parser] )
return config_parser
def UpperCamelCase_( ):
"""simple docstring"""
__a =get_config_parser()
__a =config_parser.parse_args()
if not hasattr(_snake_case , 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(_snake_case )
if __name__ == "__main__":
main()
| 355
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
super().__init__()
__a =model
__a =2
__a =nn.Linear(self.model.config.hidden_size , self.num_labels )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =LongformerModel.from_pretrained(_snake_case )
__a =LightningModel(_snake_case )
__a =torch.load(_snake_case , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__a =LongformerForQuestionAnswering.from_pretrained(_snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_snake_case )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 308
| 0
|
'''simple docstring'''
import math
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float ) -> float:
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(SCREAMING_SNAKE_CASE__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 125
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __a :
def __init__( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any]=13 , __magic_name__ : int=7 , __magic_name__ : int=False , __magic_name__ : str=True , __magic_name__ : int=False , __magic_name__ : Optional[int]=False , __magic_name__ : List[Any]=19 , __magic_name__ : Any=32 , __magic_name__ : List[Any]=5 , __magic_name__ : Dict=4 , __magic_name__ : Optional[int]=37 , __magic_name__ : str="gelu" , __magic_name__ : int=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[str]=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Union[str, Any]=0.0_2 , __magic_name__ : int=3 , __magic_name__ : Dict=4 , __magic_name__ : str=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : Tuple = is_training
UpperCAmelCase_ : Tuple = use_input_mask
UpperCAmelCase_ : Union[str, Any] = use_token_type_ids
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Tuple = type_vocab_size
UpperCAmelCase_ : Dict = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : List[str] = num_choices
UpperCAmelCase_ : Optional[Any] = scope
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Tuple = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__magic_name__ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def UpperCAmelCase__ ( self : str , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = EsmForProteinFolding(config=__magic_name__ ).float()
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Any = False
__a : Any = (EsmForProteinFolding,) if is_torch_available() else ()
__a : Any = ()
__a : Optional[Any] = {} if is_torch_available() else {}
__a : List[str] = False
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = EsmFoldModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip('''Does not support attention outputs''' )
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
pass
@unittest.skip
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''ESMFold only has one output format.''' )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
pass
@require_torch
class __a (lowerCamelCase ):
@slow
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCAmelCase_ : Optional[int] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : Tuple = model(__magic_name__ )['''positions''']
UpperCAmelCase_ : Dict = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __magic_name__ , atol=1E-4 ) )
| 125
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( UpperCamelCase__ ):
_a = '''fnet'''
def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int:
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
_A : str = vocab_size
_A : Tuple = max_position_embeddings
_A : Tuple = hidden_size
_A : List[Any] = num_hidden_layers
_A : Union[str, Any] = intermediate_size
_A : Union[str, Any] = hidden_act
_A : List[str] = hidden_dropout_prob
_A : Union[str, Any] = initializer_range
_A : Optional[Any] = type_vocab_size
_A : List[Any] = layer_norm_eps
_A : Union[str, Any] = use_tpu_fourier_optimizations
_A : List[Any] = tpu_short_seq_length
| 357
|
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : List[Any] = 3
_A : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343
| 0
|
from math import log
from scipy.constants import Boltzmann, physical_constants
__UpperCAmelCase = 300 # TEMPERATURE (unit = K)
def UpperCamelCase ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCAmelCase = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__UpperCAmelCase = json.load(f)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return FSMTTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : List[Any] = FSMTForConditionalGeneration.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCamelCase : int = F"""facebook/wmt19-{pair}"""
UpperCamelCase : Optional[int] = self.get_tokenizer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.get_model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = bleu_data[pair]['src']
UpperCamelCase : Tuple = bleu_data[pair]['tgt']
UpperCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE_, return_tensors='pt', truncation=SCREAMING_SNAKE_CASE_, padding='longest' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model.generate(
input_ids=batch.input_ids, num_beams=8, )
UpperCamelCase : Tuple = tokenizer.batch_decode(
SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = calculate_bleu(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(scores['bleu'], SCREAMING_SNAKE_CASE_ )
| 119
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ : List[Any] ={'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict =['''YolosFeatureExtractor''']
A__ : Tuple =['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] =[
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
A__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 220
|
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("""String lengths must match!""" )
_lowerCAmelCase = 0
for chara, chara in zip(lowerCAmelCase , lowerCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220
| 1
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
debug_launcher(test_script.main )
def __UpperCAmelCase ( self ):
debug_launcher(test_ops.main )
| 45
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : Union[str, Any] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 125
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_a ) , "Tatoeba directory does not exist." )
class __snake_case (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
'''simple docstring'''
self.resolver.convert_models(["""heb-eng"""] )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=_UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 159
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_lowerCamelCase : Tuple = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_lowerCamelCase : List[str] = get_tests_dir("fixtures/vocab.json")
_lowerCamelCase : str = get_tests_dir("fixtures")
class __snake_case (unittest.TestCase ):
lowerCAmelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Any = 0
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : List[Any] = WavaVecaConfig()
_lowerCAmelCase : str = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """vocab.json""" ) )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Any = WavaVecaFeatureExtractor()
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_lowerCAmelCase : List[str] = WavaVecaProcessor(_UpperCAmelCase , _UpperCAmelCase )
# save in new folder
processor.save_pretrained(_UpperCAmelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """r""" ) as f:
_lowerCAmelCase : Union[str, Any] = json.load(_UpperCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as f:
f.write(json.dumps(_UpperCAmelCase ) )
_lowerCAmelCase : List[Any] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Dict = WavaVecaFeatureExtractor()
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_lowerCAmelCase : str = WavaVecaProcessor(_UpperCAmelCase , _UpperCAmelCase )
# save in new folder
processor.save_pretrained(_UpperCAmelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """r""" ) as f:
_lowerCAmelCase : str = json.load(_UpperCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as f:
f.write(json.dumps(_UpperCAmelCase ) )
_lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Tuple = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(_UpperCAmelCase )
# copy relevant files
copyfile(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , """w""" ) as f:
f.write("""{}""" )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Any = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : List[str] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
_lowerCAmelCase : Optional[int] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
_lowerCAmelCase : Union[str, Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
_lowerCAmelCase : List[str] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase : List[str] = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Tuple = os.path.join(_UpperCAmelCase , """vocab.txt""" )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowerCAmelCase : str = CustomTokenizer(_UpperCAmelCase )
_lowerCAmelCase : List[str] = CustomProcessor(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
'''simple docstring'''
class __snake_case (_a ):
lowerCAmelCase__ = False
class __snake_case (_a ):
lowerCAmelCase__ = False
class __snake_case (_a ):
lowerCAmelCase__ = "AutoFeatureExtractor"
lowerCAmelCase__ = "AutoTokenizer"
lowerCAmelCase__ = False
try:
AutoConfig.register("""custom""" , _UpperCAmelCase )
AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# If remote code is not set, the default is to use local classes.
_lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_lowerCAmelCase : str = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class __snake_case (unittest.TestCase ):
lowerCAmelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple ) -> Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = WavaVecaProcessor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_UpperCAmelCase , """test-processor""" ) , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
_lowerCAmelCase : str = WavaVecaProcessor.from_pretrained(f"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(new_processor.feature_extractor , _UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : int = WavaVecaProcessor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_UpperCAmelCase , """test-processor-org""" ) , push_to_hub=_UpperCAmelCase , use_auth_token=self._token , organization="""valid_org""" , )
_lowerCAmelCase : str = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(new_processor.feature_extractor , _UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_lowerCAmelCase : Any = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : int = os.path.join(_UpperCAmelCase , """vocab.txt""" )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_lowerCAmelCase : List[str] = CustomTokenizer(_UpperCAmelCase )
_lowerCAmelCase : List[str] = CustomProcessor(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"{USER}/test-dynamic-processor" , token=self._token )
_lowerCAmelCase : Union[str, Any] = Repository(_UpperCAmelCase , clone_from=f"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(_UpperCAmelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_UpperCAmelCase , """tokenizer_config.json""" ) ) as f:
_lowerCAmelCase : str = json.load(_UpperCAmelCase )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , """custom_processing.py""" ) ) )
repo.push_to_hub()
_lowerCAmelCase : Tuple = AutoProcessor.from_pretrained(f"{USER}/test-dynamic-processor" , trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 159
| 1
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
def UpperCamelCase_( snake_case : torch.nn.Module , snake_case : BnbQuantizationConfig , snake_case : Union[str, os.PathLike] = None , snake_case : Optional[Dict[str, Union[int, str, torch.device]]] = None , snake_case : Optional[List[str]] = None , snake_case : Optional[Dict[Union[int, str], Union[int, str]]] = None , snake_case : Optional[Union[str, os.PathLike]] = None , snake_case : bool = False , ):
'''simple docstring'''
snake_case_ = bnb_quantization_config.load_in_abit
snake_case_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
snake_case_ = []
# custom device map
if isinstance(snake_case , snake_case ) and len(device_map.keys() ) > 1:
snake_case_ = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
snake_case_ = get_keys_to_not_convert(snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case )
snake_case_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
snake_case_ = []
snake_case_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case )
# compatibility with peft
snake_case_ = load_in_abit
snake_case_ = load_in_abit
snake_case_ = get_parameter_device(snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
snake_case_ = replace_with_bnb_layers(snake_case , snake_case , modules_to_not_convert=snake_case )
# convert param to the right dtype
snake_case_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
snake_case_ = name.replace(".weight" , "" ).replace(".bias" , "" )
snake_case_ = getattr(snake_case , snake_case , snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(snake_case ):
param.to(snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
snake_case_ = replace_with_bnb_layers(
snake_case , snake_case , modules_to_not_convert=snake_case )
snake_case_ = get_quantized_model_device_map(
snake_case , snake_case , snake_case , max_memory=snake_case , no_split_module_classes=snake_case , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
snake_case_ = True
snake_case_ = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
snake_case , snake_case , snake_case , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case , offload_state_dict=snake_case , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case , device_map=snake_case , offload_dir=snake_case )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Optional[Any]=None , snake_case : Any=None , snake_case : Any=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
snake_case_ = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(snake_case , snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
snake_case_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
snake_case_ = {}
snake_case_ = special_dtypes
snake_case_ = no_split_module_classes
snake_case_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
snake_case_ = get_balanced_memory(
snake_case , low_zero=(device_map == "balanced_low_0") , max_memory=snake_case , **snake_case , )
snake_case_ = max_memory
snake_case_ = infer_auto_device_map(snake_case , **snake_case )
if isinstance(snake_case , snake_case ):
# check if don't have any quantized module on the cpu
snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
snake_case_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def UpperCamelCase_( snake_case : Tuple , snake_case : str , snake_case : int=None , snake_case : Optional[Any]=None ):
'''simple docstring'''
if modules_to_not_convert is None:
snake_case_ = []
snake_case_ , snake_case_ = _replace_with_bnb_layers(
snake_case , snake_case , snake_case , snake_case )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def UpperCamelCase_( snake_case : List[Any] , snake_case : Optional[int] , snake_case : int=None , snake_case : Any=None , ):
'''simple docstring'''
snake_case_ = False
for name, module in model.named_children():
if current_key_name is None:
snake_case_ = []
current_key_name.append(snake_case )
if isinstance(snake_case , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
snake_case_ = ".".join(snake_case )
snake_case_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
snake_case_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
snake_case_ = module.weight.data
if module.bias is not None:
snake_case_ = module.bias.data
bnb_module.requires_grad_(snake_case )
setattr(snake_case , snake_case , snake_case )
snake_case_ = True
if len(list(module.children() ) ) > 0:
snake_case_ , snake_case_ = _replace_with_bnb_layers(
snake_case , snake_case , snake_case , snake_case )
snake_case_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase_( snake_case : Optional[Any] ):
'''simple docstring'''
with init_empty_weights():
snake_case_ = deepcopy(snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
snake_case_ = find_tied_parameters(snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case , snake_case ):
snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case_ = sum(snake_case , [] )
snake_case_ = len(snake_case ) > 0
# Check if it is a base model
snake_case_ = False
if hasattr(snake_case , "base_model_prefix" ):
snake_case_ = not hasattr(snake_case , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ = list(model.named_children() )
snake_case_ = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ = set(snake_case ) - set(snake_case )
snake_case_ = list(set(snake_case ) ) + list(snake_case )
# remove ".weight" from the keys
snake_case_ = [".weight", ".bias"]
snake_case_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ = name.replace(snake_case , "" )
filtered_module_names.append(snake_case )
return filtered_module_names
def UpperCamelCase_( snake_case : List[Any] ):
'''simple docstring'''
for m in model.modules():
if isinstance(snake_case , bnb.nn.Linearabit ):
return True
return False
def UpperCamelCase_( snake_case : nn.Module ):
'''simple docstring'''
return next(parameter.parameters() ).device
def UpperCamelCase_( snake_case : int , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Dict , snake_case : int , snake_case : int ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case , snake_case , 0 , dtype=snake_case , value=snake_case )
snake_case_ = param_name
snake_case_ = model
if "." in tensor_name:
snake_case_ = tensor_name.split("." )
for split in splits[:-1]:
snake_case_ = getattr(snake_case , snake_case )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
snake_case_ = new_module
snake_case_ = splits[-1]
# offload weights
snake_case_ = False
offload_weight(module._parameters[tensor_name] , snake_case , snake_case , index=snake_case )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , snake_case , index=snake_case , )
else:
offload_weight(snake_case , snake_case , snake_case , index=snake_case )
offload_weight(snake_case , param_name.replace("weight" , "SCB" ) , snake_case , index=snake_case )
set_module_tensor_to_device(snake_case , snake_case , "meta" , dtype=snake_case , value=torch.empty(*param.size() ) )
| 85
|
def snake_case( __magic_name__ = 50 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
"""simple docstring"""
def __init__( self : str,lowercase_ : str,lowercase_ : Tuple=1_3,lowercase_ : List[Any]=3_2,lowercase_ : Union[str, Any]=3,lowercase_ : Dict=4,lowercase_ : Dict=[1_0, 2_0, 3_0, 4_0],lowercase_ : Any=[2, 2, 3, 2],lowercase_ : str=True,lowercase_ : Optional[Any]=True,lowercase_ : Optional[Any]=3_7,lowercase_ : Optional[int]="gelu",lowercase_ : Optional[Any]=1_0,lowercase_ : Dict=0.02,lowercase_ : Dict=["stage2", "stage3", "stage4"],lowercase_ : Union[str, Any]=3,lowercase_ : Optional[Any]=None,)-> Optional[int]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = type_sequence_label_size
A__ = initializer_range
A__ = out_features
A__ = num_labels
A__ = scope
A__ = num_stages
def snake_case__ ( self : Tuple )-> Optional[Any]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Dict )-> Union[str, Any]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels,num_stages=self.num_stages,hidden_sizes=self.hidden_sizes,depths=self.depths,is_training=self.is_training,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,out_features=self.out_features,)
def snake_case__ ( self : Tuple )-> Union[str, Any]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(),hidden_size=5_1_2,pool_scales=[1, 2, 3, 6],use_auxiliary_head=lowercase_,auxiliary_loss_weight=0.4,auxiliary_in_channels=4_0,auxiliary_channels=2_5_6,auxiliary_num_convs=1,auxiliary_concat_input=lowercase_,loss_ignore_index=2_5_5,num_labels=self.num_labels,)
def snake_case__ ( self : Optional[Any],lowercase_ : Dict,lowercase_ : int,lowercase_ : Optional[int] )-> Dict:
'''simple docstring'''
A__ = UperNetForSemanticSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case__ ( self : int )-> Dict:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCamelCase = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
A__ = UperNetModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : Optional[Any] )-> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
return
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1],lowercase_ )
def snake_case__ ( self : List[str] )-> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def snake_case__ ( self : Any )-> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def snake_case__ ( self : str )-> int:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def snake_case__ ( self : Union[str, Any] )-> Dict:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
pass
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
def check_hidden_states_output(lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Any ):
A__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowercase_,lowercase_ ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ),expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 4, self.model_tester.image_size // 4],)
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(lowercase_,lowercase_,lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(lowercase_ )
A__ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A__ = model_class(config=lowercase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(),[0.0, 1.0],msg=F'Parameter {name} of model {model_class} seems not properly initialized',)
@unittest.skip(reason='UperNet does not have tied weights' )
def snake_case__ ( self : Tuple )-> Optional[int]:
'''simple docstring'''
pass
@slow
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = UperNetForSemanticSegmentation.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _snake_case( ) -> int:
'''simple docstring'''
A__ = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
A__ = Image.open(SCREAMING_SNAKE_CASE__ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[int] )-> List[Any]:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
A__ = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowercase_ )
A__ = prepare_img()
A__ = processor(images=lowercase_,return_tensors='pt' ).to(lowercase_ )
with torch.no_grad():
A__ = model(**lowercase_ )
A__ = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape,lowercase_ )
A__ = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3],lowercase_,atol=1E-4 ) )
def snake_case__ ( self : Any )-> Any:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
A__ = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowercase_ )
A__ = prepare_img()
A__ = processor(images=lowercase_,return_tensors='pt' ).to(lowercase_ )
with torch.no_grad():
A__ = model(**lowercase_ )
A__ = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape,lowercase_ )
A__ = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3],lowercase_,atol=1E-4 ) )
| 7
|
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
# Base Case
if index == len(UpperCamelCase_ ):
return True
# Recursive Step
for i in range(UpperCamelCase_ ):
if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
UpperCamelCase = [-1] * len(UpperCamelCase_ )
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ):
return colored_vertices
return []
| 343
| 0
|
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] ):
'''simple docstring'''
a : dict[str, TrieNode] = {} # Mapping from char to TrieNode
a : str = False
def lowerCamelCase__ ( self : Tuple , A : list[str] ):
'''simple docstring'''
for word in words:
self.insert(A )
def lowerCamelCase__ ( self : Dict , A : str ):
'''simple docstring'''
a : List[str] = self
for char in word:
if char not in curr.nodes:
a : Dict = TrieNode()
a : Any = curr.nodes[char]
a : List[str] = True
def lowerCamelCase__ ( self : str , A : str ):
'''simple docstring'''
a : Tuple = self
for char in word:
if char not in curr.nodes:
return False
a : Optional[Any] = curr.nodes[char]
return curr.is_leaf
def lowerCamelCase__ ( self : int , A : str ):
'''simple docstring'''
def _delete(A : TrieNode , A : str , A : int ) -> bool:
if index == len(A ):
# If word does not exist
if not curr.is_leaf:
return False
a : int = False
return len(curr.nodes ) == 0
a : Tuple = word[index]
a : Dict = curr.nodes.get(A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
a : str = _delete(A , A , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A , 0 )
def snake_case (A_ :TrieNode , A_ :str ):
'''simple docstring'''
if node.is_leaf:
print(A_ , end=' ' )
for key, value in node.nodes.items():
print_words(A_ , word + key )
def snake_case ():
'''simple docstring'''
a : Dict = 'banana bananas bandana band apple all beast'.split()
a : Optional[Any] = TrieNode()
root.insert_many(A_ )
# print_words(root, "")
assert all(root.find(A_ ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def snake_case (A_ :str , A_ :bool ):
'''simple docstring'''
print(str(A_ ) , 'works!' if passes else 'doesn\'t work :(' )
def snake_case ():
'''simple docstring'''
assert test_trie()
def snake_case ():
'''simple docstring'''
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 186
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_UpperCamelCase : Optional[Any] = 'examples/'
_UpperCamelCase : Any = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_UpperCamelCase : List[str] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_UpperCamelCase : List[str] = 'README.md'
def snake_case (A_ :str , A_ :Optional[Any] , A_ :Any ):
'''simple docstring'''
with open(A_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.read()
a, a : Any = REPLACE_PATTERNS[pattern]
a : Dict = replace.replace('VERSION' , A_ )
a : Union[str, Any] = re_pattern.sub(A_ , A_ )
with open(A_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(A_ )
def snake_case (A_ :List[Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(A_ , A_ ) , A_ , pattern='examples' )
def snake_case (A_ :Tuple , A_ :Optional[Any]=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A_ , A_ , A_ )
if not patch:
update_version_in_examples(A_ )
def snake_case ():
'''simple docstring'''
a : str = '🤗 Transformers currently provides the following architectures'
a : Dict = '1. Want to contribute a new model?'
with open(A_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Optional[Any] = f.readlines()
# Find the start of the list.
a : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
a : int = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(A_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(A_ )
def snake_case ():
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r' ) as f:
a : List[str] = f.read()
a : str = REPLACE_PATTERNS['init'][0].search(A_ ).groups()[0]
return packaging.version.parse(A_ )
def snake_case (A_ :Optional[Any]=False ):
'''simple docstring'''
a : Optional[int] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
a : Tuple = default_version.base_version
elif patch:
a : Union[str, Any] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
a : Optional[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
a : Union[str, Any] = input(f'''Which version are you releasing? [{default_version}]''' )
if len(A_ ) == 0:
a : int = default_version
print(f'''Updating version to {version}.''' )
global_version_update(A_ , patch=A_ )
def snake_case ():
'''simple docstring'''
a : str = get_version()
a : Optional[int] = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
a : Optional[int] = current_version.base_version
# Check with the user we got that right.
a : str = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(A_ ) == 0:
a : Union[str, Any] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(A_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_UpperCamelCase : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 186
| 1
|
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( a_ ):
UpperCAmelCase_ : List[Any] =["image_processor", "tokenizer"]
UpperCAmelCase_ : str ="AutoImageProcessor"
UpperCAmelCase_ : Any ="AutoTokenizer"
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCamelCase , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_lowerCamelCase , _lowerCamelCase )
lowercase = self.image_processor
lowercase = False
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
lowercase = kwargs.pop('images' , _lowerCamelCase )
lowercase = kwargs.pop('text' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowercase = args[0]
lowercase = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowercase = self.image_processor(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if text is not None:
lowercase = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def UpperCamelCase_ ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
lowercase = True
lowercase = self.tokenizer
yield
lowercase = self.image_processor
lowercase = False
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=None ):
if added_vocab is None:
lowercase = self.tokenizer.get_added_vocab()
lowercase = {}
while tokens:
lowercase = re.search(R'<s_(.*?)>' , _lowerCamelCase , re.IGNORECASE )
if start_token is None:
break
lowercase = start_token.group(1 )
lowercase = re.search(RF'</s_{key}>' , _lowerCamelCase , re.IGNORECASE )
lowercase = start_token.group()
if end_token is None:
lowercase = tokens.replace(_lowerCamelCase , '' )
else:
lowercase = end_token.group()
lowercase = re.escape(_lowerCamelCase )
lowercase = re.escape(_lowerCamelCase )
lowercase = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , _lowerCamelCase , re.IGNORECASE )
if content is not None:
lowercase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowercase = self.tokenajson(_lowerCamelCase , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if value:
if len(_lowerCamelCase ) == 1:
lowercase = value[0]
lowercase = value
else: # leaf nodes
lowercase = []
for leaf in content.split(R'<sep/>' ):
lowercase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowercase = leaf[1:-2] # for categorical special tokens
output[key].append(_lowerCamelCase )
if len(output[key] ) == 1:
lowercase = output[key][0]
lowercase = tokens[tokens.find(_lowerCamelCase ) + len(_lowerCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if len(_lowerCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCamelCase , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCamelCase , )
return self.image_processor
| 220
|
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class a ( a_ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = 5_0 , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ):
lowercase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_lowerCamelCase , )
lowercase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
lowercase = (image / 2 + 0.5).clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_lowerCamelCase ), "This is a local test"
| 220
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Optional[int] , _lowerCAmelCase : Union[str, Any] , ):
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = """gelu"""
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def A (self : List[str] ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A (self : Dict ):
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A (self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
A = TFEsmModel(config=__lowerCAmelCase )
A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A = model(__lowerCAmelCase )
A = [input_ids, input_mask]
A = model(__lowerCAmelCase )
A = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A (self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : int , ):
A = True
A = TFEsmModel(config=__lowerCAmelCase )
A = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
A = model(__lowerCAmelCase )
A = [input_ids, input_mask]
A = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
A = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A (self : int , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
A = TFEsmForMaskedLM(config=__lowerCAmelCase )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A (self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
A = self.num_labels
A = TFEsmForTokenClassification(config=__lowerCAmelCase )
A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Tuple ):
A = TFEsmModelTester(self )
A = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A (self : int ):
self.config_tester.run_common_tests()
def A (self : List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A (self : Tuple ):
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def A (self : Optional[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A (self : Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A (self : Optional[int] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def A (self : List[Any] ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def A (self : str ):
pass
def A (self : Dict ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Union[str, Any] ):
A = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(__lowerCAmelCase )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def A (self : List[str] ):
A = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 358
|
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowerCamelCase : Any = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
A = primes[group]["""prime"""]
A = primes[group]["""generator"""]
A = int(hexlify(urandom(32 ) ) , base=16 )
def A (self : Optional[Any] ):
return hex(self.__private_key )[2:]
def A (self : Union[str, Any] ):
A = pow(self.generator , self.__private_key , self.prime )
return hex(_lowerCAmelCase )[2:]
def A (self : Any , _lowerCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_lowerCAmelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def A (self : List[str] , _lowerCAmelCase : str ):
A = int(_lowerCAmelCase , base=16 )
if not self.is_valid_public_key(_lowerCAmelCase ):
raise ValueError("""Invalid public key""" )
A = pow(_lowerCAmelCase , self.__private_key , self.prime )
return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest()
@staticmethod
def A (_lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowerCAmelCase , (prime - 1) // 2 , _lowerCAmelCase ) == 1
)
@staticmethod
def A (_lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : int = 14 ):
A = int(_lowerCAmelCase , base=16 )
A = int(_lowerCAmelCase , base=16 )
A = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""Invalid public key""" )
A = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return shaaaa(str(_lowerCAmelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337
| 0
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :Optional[int] )->Dict:
'''simple docstring'''
snake_case_ = TaConfig.from_json_file(lowerCAmelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case_ = TaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 159
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE :Any = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
_SCREAMING_SNAKE_CASE = GPTaTokenizer
def __init__( self : Any , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Union[str, Any]="<|endoftext|>" , _lowerCAmelCase : Union[str, Any]="<|endoftext|>" , _lowerCAmelCase : Union[str, Any]="<|endoftext|>" , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
snake_case_ = kwargs.pop("add_bos_token" , _lowerCAmelCase )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _lowerCAmelCase ) != add_prefix_space:
snake_case_ = getattr(_lowerCAmelCase , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**_lowerCAmelCase )
snake_case_ = add_prefix_space
def lowerCAmelCase__ ( self : List[Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : List[str] ) -> BatchEncoding:
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Dict , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str] ) -> BatchEncoding:
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : "Conversation" ) -> List[int]:
"""simple docstring"""
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] )
if len(_lowerCAmelCase ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 159
| 1
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =[
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =emb.weight.shape
lowerCamelCase__: Dict =nn.Linear(__a , __a , bias=__a )
lowerCamelCase__: Optional[int] =emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: int =torch.load(__a , map_location="cpu" )
lowerCamelCase__: str =Namespace(**checkpoint["cfg"]["model"] )
lowerCamelCase__: Any =checkpoint["model"]
remove_ignore_keys_(__a )
lowerCamelCase__: Dict =state_dict["decoder.embed_tokens.weight"].shape[0]
lowerCamelCase__: Union[str, Any] ={key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
lowerCamelCase__: List[str] =XGLMConfig(
vocab_size=__a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCamelCase__: Optional[Any] =XGLMForCausalLM(__a )
lowerCamelCase__: str =model.load_state_dict(__a , strict=__a )
print(__a )
lowerCamelCase__: int =make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__A = parser.parse_args()
__A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 273
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "distilbert"
lowercase_ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self : Any , UpperCAmelCase_ : str=30_522 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : List[Any]=4 * 768 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.2 , UpperCAmelCase_ : int=0 , **UpperCAmelCase_ : List[Any] , ) ->Any:
'''simple docstring'''
lowerCamelCase__: int =vocab_size
lowerCamelCase__: Any =max_position_embeddings
lowerCamelCase__: Optional[int] =sinusoidal_pos_embds
lowerCamelCase__: str =n_layers
lowerCamelCase__: str =n_heads
lowerCamelCase__: str =dim
lowerCamelCase__: Optional[Any] =hidden_dim
lowerCamelCase__: Dict =dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: int =activation
lowerCamelCase__: Dict =initializer_range
lowerCamelCase__: Optional[Any] =qa_dropout
lowerCamelCase__: int =seq_classif_dropout
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__: Dict ={0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__: Optional[int] ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 273
| 1
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_lowerCAmelCase : int = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=False , ) -> List[str]:
output_path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , enable_onnx_checker=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
else:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Any = False ) -> Optional[Any]:
A_ : str = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A_ : Union[str, Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
A_ : int = '''cpu'''
A_ : Any = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=_lowerCAmelCase ).to(_lowerCAmelCase )
A_ : Tuple = Path(_lowerCAmelCase )
# TEXT ENCODER
A_ : List[Any] = pipeline.text_encoder.config.max_position_embeddings
A_ : Optional[Any] = pipeline.text_encoder.config.hidden_size
A_ : Dict = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_lowerCAmelCase , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=_lowerCAmelCase , )
del pipeline.text_encoder
# UNET
A_ : Tuple = pipeline.unet.config.in_channels
A_ : str = pipeline.unet.config.sample_size
A_ : str = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(2 ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(2 , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=_lowerCAmelCase , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , )
A_ : List[str] = str(unet_path.absolute().as_posix() )
A_ : Optional[int] = os.path.dirname(_lowerCAmelCase )
A_ : Optional[Any] = onnx.load(_lowerCAmelCase )
# clean up existing tensor files
shutil.rmtree(_lowerCAmelCase )
os.mkdir(_lowerCAmelCase )
# collate external tensor files into one
onnx.save_model(
_lowerCAmelCase , _lowerCAmelCase , save_as_external_data=_lowerCAmelCase , all_tensors_to_one_file=_lowerCAmelCase , location="weights.pb" , convert_attribute=_lowerCAmelCase , )
del pipeline.unet
# VAE ENCODER
A_ : Union[str, Any] = pipeline.vae
A_ : Tuple = vae_encoder.config.in_channels
A_ : Any = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
A_ : int = lambda _lowerCAmelCase , _lowerCAmelCase : vae_encoder.encode(_lowerCAmelCase , _lowerCAmelCase )[0].sample()
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_lowerCAmelCase , )
# VAE DECODER
A_ : int = pipeline.vae
A_ : Union[str, Any] = vae_decoder.config.latent_channels
A_ : List[str] = vae_decoder.config.out_channels
# forward only through the decoder part
A_ : Optional[Any] = vae_encoder.decode
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_lowerCAmelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
A_ : Dict = pipeline.safety_checker
A_ : Union[str, Any] = safety_checker.config.vision_config.num_channels
A_ : str = safety_checker.config.vision_config.image_size
A_ : Union[str, Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=_lowerCAmelCase , )
del pipeline.safety_checker
A_ : Dict = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
A_ : Dict = pipeline.feature_extractor
else:
A_ : int = None
A_ : List[Any] = None
A_ : Dict = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_lowerCAmelCase )
print("ONNX pipeline saved to" , _lowerCAmelCase )
del pipeline
del onnx_pipeline
A_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_lowerCAmelCase : Any = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 300
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__snake_case : Optional[int] = 1_9_2
__snake_case : List[Any] = 7_6_8
__snake_case : List[Any] = 1_2
__snake_case : Union[str, Any] = 3
__snake_case : Union[str, Any] = [8_0_0, 1_3_3_3]
__snake_case : Optional[int] = False
elif yolos_name == "yolos_s_dWr":
__snake_case : Tuple = 3_3_0
__snake_case : Optional[int] = 1_4
__snake_case : str = 6
__snake_case : Tuple = 1_3_2_0
elif "yolos_s" in yolos_name:
__snake_case : str = 3_8_4
__snake_case : List[Any] = 1_5_3_6
__snake_case : List[Any] = 1_2
__snake_case : List[str] = 6
elif "yolos_b" in yolos_name:
__snake_case : Any = [8_0_0, 1_3_4_4]
__snake_case : Any = 9_1
__snake_case : Tuple = "huggingface/label-files"
__snake_case : Optional[Any] = "coco-detection-id2label.json"
__snake_case : Any = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
__snake_case : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : Dict = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
__snake_case : List[Any] = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Any = in_proj_weight[: config.hidden_size, :]
__snake_case : int = in_proj_bias[: config.hidden_size]
__snake_case : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : List[str] = in_proj_weight[-config.hidden_size :, :]
__snake_case : str = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __lowerCamelCase ):
if "backbone" in name:
__snake_case : str = name.replace("backbone" , "vit" )
if "cls_token" in name:
__snake_case : int = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
__snake_case : List[Any] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
__snake_case : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
__snake_case : Union[str, Any] = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__snake_case : List[Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
__snake_case : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
__snake_case : int = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__snake_case : int = name.replace("attn" , "attention.self" )
if "norm1" in name:
__snake_case : List[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__snake_case : Optional[Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__snake_case : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__snake_case : Any = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
__snake_case : Optional[int] = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
__snake_case : Optional[Any] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
__snake_case : Optional[int] = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
for key in orig_state_dict.copy().keys():
__snake_case : List[Any] = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
__snake_case : Union[str, Any] = key.split("." )
__snake_case : Tuple = int(key_split[2] )
__snake_case : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__snake_case : List[Any] = val[:dim, :]
__snake_case : Optional[Any] = val[
dim : dim * 2, :
]
__snake_case : str = val[-dim:, :]
else:
__snake_case : Union[str, Any] = val[:dim]
__snake_case : Any = val[dim : dim * 2]
__snake_case : List[Any] = val[-dim:]
else:
__snake_case : Union[str, Any] = val
return orig_state_dict
def lowerCAmelCase_ ( ):
__snake_case : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : Tuple = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
__snake_case : str = get_yolos_config(__lowerCamelCase )
# load original state_dict
__snake_case : List[str] = torch.load(__lowerCamelCase , map_location="cpu" )["model"]
# load 🤗 model
__snake_case : List[str] = YolosForObjectDetection(__lowerCamelCase )
model.eval()
__snake_case : List[str] = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
__snake_case : Tuple = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
__snake_case : List[Any] = YolosImageProcessor(format="coco_detection" , size=__lowerCamelCase )
__snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
__snake_case : Optional[Any] = model(**__lowerCamelCase )
__snake_case : Any = outputs.logits, outputs.pred_boxes
__snake_case : Optional[Any] = None, None
if yolos_name == "yolos_ti":
__snake_case : int = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
__snake_case : Any = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
__snake_case : Optional[Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
__snake_case : Optional[Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
__snake_case : Tuple = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
__snake_case : Optional[Any] = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
__snake_case : Tuple = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
__snake_case : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
__snake_case : Optional[int] = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
__snake_case : Optional[int] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
__snake_case : Any = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
__snake_case : int = model_mapping[yolos_name]
image_processor.push_to_hub(__lowerCamelCase , organization="hustvl" )
model.push_to_hub(__lowerCamelCase , organization="hustvl" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case : Any = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 350
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
_snake_case : Tuple = 100
_snake_case : int = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_snake_case : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def lowerCAmelCase_ ( __lowerCamelCase ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case : set[int] = set()
__snake_case : int
__snake_case : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCAmelCase_ ( __lowerCamelCase = 5_0_0_0 ):
for number_to_partition in range(1 , __lowerCamelCase ):
if len(partition(__lowerCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 134
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.