code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase = '''true'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=16 ):
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(__SCREAMING_SNAKE_CASE )
lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
lowercase = dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=16 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
lowercase = get_dataloader(__SCREAMING_SNAKE_CASE , not dispatch_batches )
lowercase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for batch in dataloader:
lowercase , lowercase = batch.values()
with torch.no_grad():
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase , lowercase = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=16 ):
lowercase , lowercase , lowercase = get_basic_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase , lowercase = generate_predictions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False ):
lowercase = evaluate.load('glue' , 'mrpc' )
lowercase , lowercase = get_mrpc_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# First do baseline
lowercase , lowercase , lowercase = setup['no']
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=batch['labels'] )
lowercase = metric.compute()
# Then do distributed
lowercase , lowercase , lowercase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase = batch['labels']
lowercase , lowercase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
lowercase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase_ ( ):
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 84 | 0 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = DDIMPipeline
UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase = False
def A__ ( self :List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[Any] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
__magic_name__ : str =DDIMScheduler()
__magic_name__ : str ={"""unet""": unet, """scheduler""": scheduler}
return components
def A__ ( self :Any , __snake_case :Union[str, Any] , __snake_case :Any=0 ):
'''simple docstring'''
if str(__snake_case ).startswith("""mps""" ):
__magic_name__ : List[str] =torch.manual_seed(__snake_case )
else:
__magic_name__ : List[str] =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__ : str ={
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Any ="""cpu"""
__magic_name__ : Tuple =self.get_dummy_components()
__magic_name__ : Union[str, Any] =self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : str =self.get_dummy_inputs(__snake_case )
__magic_name__ : int =pipe(**__snake_case ).images
__magic_name__ : Any =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__magic_name__ : List[str] =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
__magic_name__ : Tuple =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1E-3 )
def A__ ( self :Tuple ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def A__ ( self :str ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def A__ ( self :List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Optional[Any] ="""google/ddpm-cifar10-32"""
__magic_name__ : Dict =UNetaDModel.from_pretrained(__snake_case )
__magic_name__ : Dict =DDIMScheduler()
__magic_name__ : Optional[int] =DDIMPipeline(unet=__snake_case , scheduler=__snake_case )
ddim.to(__snake_case )
ddim.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Optional[Any] =torch.manual_seed(0 )
__magic_name__ : int =ddim(generator=__snake_case , eta=0.0 , output_type="""numpy""" ).images
__magic_name__ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : List[Any] =np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] ="""google/ddpm-ema-bedroom-256"""
__magic_name__ : str =UNetaDModel.from_pretrained(__snake_case )
__magic_name__ : Any =DDIMScheduler.from_pretrained(__snake_case )
__magic_name__ : Any =DDIMPipeline(unet=__snake_case , scheduler=__snake_case )
ddpm.to(__snake_case )
ddpm.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Union[str, Any] =torch.manual_seed(0 )
__magic_name__ : List[str] =ddpm(generator=__snake_case , output_type="""numpy""" ).images
__magic_name__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__magic_name__ : Union[str, Any] =np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 21 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : Any = """OwlViTImageProcessor"""
_UpperCamelCase : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case=None , snake_case=None , **snake_case ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="max_length" , snake_case="np" , **snake_case ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(snake_case , snake_case ) or (isinstance(snake_case , snake_case ) and not isinstance(text[0] , snake_case )):
lowercase = [self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )]
elif isinstance(snake_case , snake_case ) and isinstance(text[0] , snake_case ):
lowercase = []
# Maximum number of queries across batch
lowercase = max([len(snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case ) != max_num_queries:
lowercase = t + [' '] * (max_num_queries - len(snake_case ))
lowercase = self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )
encodings.append(snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase = BatchEncoding()
lowercase = input_ids
lowercase = attention_mask
if query_images is not None:
lowercase = BatchEncoding()
lowercase = self.image_processor(
snake_case , return_tensors=snake_case , **snake_case ).pixel_values
lowercase = query_pixel_values
if images is not None:
lowercase = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_object_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_image_guided_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 84 | 0 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_a = [10, 20, 30, 40, 50, 60]
_a = [2, 4, 6, 8, 10, 12]
_a = 1_00
self.assertEqual(kp.calc_profit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 2_10 )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(lowerCAmelCase_ , '''max_weight must greater than zero.''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(lowerCAmelCase_ , '''Weight can not be negative.''' )
def __lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(lowerCAmelCase_ , '''Profit can not be negative.''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertRaisesRegex(lowerCAmelCase_ , '''max_weight must greater than zero.''' )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.assertRaisesRegex(
lowerCAmelCase_ , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 22 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Dict = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
lowercase = model(snake_case , token_type_ids=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = self.num_labels
lowercase = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCamelCase : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
lowercase = inputs_dict['labels']
lowercase = inputs_dict['labels']
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case )
lowercase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
lowercase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 84 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase_ : Optional[Any] = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
from math import pi, sqrt, tan
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values")
return 6 * side_length**2
def lowerCamelCase__ ( _a , _a , _a):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values")
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values")
return 4 * pi * radius**2
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values")
return 3 * pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values")
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase__ ( _a , _a , _a):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values")
SCREAMING_SNAKE_CASE : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values")
return 2 * pi * radius * (height + radius)
def lowerCamelCase__ ( _a , _a):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values")
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori")
return 4 * pow(_a , 2) * torus_radius * tube_radius
def lowerCamelCase__ ( _a , _a):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values")
return length * width
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values")
return side_length**2
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values")
return (base * height) / 2
def lowerCamelCase__ ( _a , _a , _a):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values")
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle")
SCREAMING_SNAKE_CASE : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : Optional[int] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values")
return base * height
def lowerCamelCase__ ( _a , _a , _a):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values")
return 1 / 2 * (basea + basea) * height
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values")
return pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values")
return pi * radius_x * radius_y
def lowerCamelCase__ ( _a , _a):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values")
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase__ ( _a , _a):
if not isinstance(_a , _a) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides")
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side")
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''') | 25 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def _a ( _lowerCamelCase = 8 , _lowerCamelCase = None ) -> str:
"""simple docstring"""
__snake_case : Any = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__snake_case : Optional[int] = 6 * key_len
# Measurement basis for Alice's qubits.
__snake_case : str = rng.integers(2 , size=_lowerCamelCase )
# The set of states Alice will prepare.
__snake_case : Any = rng.integers(2 , size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
__snake_case : Any = rng.integers(2 , size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
__snake_case : Dict = qiskit.QuantumCircuit(_lowerCamelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__snake_case : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__snake_case : Any = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1 , seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
__snake_case : str = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__snake_case : Any = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__snake_case : Any = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase , """0""" )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 26 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
UpperCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
UpperCAmelCase = re.compile(R'''^\s*else:''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.readlines()
lowercase = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase = re.findall(r'\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase = []
for key in import_dict_objects.keys():
lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
lowercase = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
lowercase = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
UpperCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase = direct_transformers_import(__SCREAMING_SNAKE_CASE )
lowercase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' ) as f:
lowercase = f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , __SCREAMING_SNAKE_CASE ) ) )
lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 84 | 0 |
import os
import string
import sys
__A : Optional[Any] = 1 << 8
__A : List[str] = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
__A : Any = KEYMAP["up"]
__A : Dict = KEYMAP["left"]
if sys.platform == "win32":
__A : Dict = []
__A : Any = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
__A : Optional[int] = ord(str(i))
def __lowerCAmelCase( ) -> Dict:
"""simple docstring"""
if os.name == "nt":
import msvcrt
_A = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_SCREAMING_SNAKE_CASE ) == 0:
# Read the keystroke
_A = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_A = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_A = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(_SCREAMING_SNAKE_CASE )
if ord(_SCREAMING_SNAKE_CASE ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_A = chr(KEYMAP['esc'] )
except KeyError:
_A = cha[1]
else:
_A = ch.decode(_SCREAMING_SNAKE_CASE )
else:
_A = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_A = sys.stdin.fileno()
_A = termios.tcgetattr(_SCREAMING_SNAKE_CASE )
try:
tty.setraw(_SCREAMING_SNAKE_CASE )
_A = sys.stdin.read(1 )
finally:
termios.tcsetattr(_SCREAMING_SNAKE_CASE , termios.TCSADRAIN , _SCREAMING_SNAKE_CASE )
return ch
def __lowerCAmelCase( ) -> Union[str, Any]:
"""simple docstring"""
_A = get_raw_chars()
if ord(_SCREAMING_SNAKE_CASE ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_SCREAMING_SNAKE_CASE ) == KEYMAP["esc"]:
_A = get_raw_chars()
if ord(_SCREAMING_SNAKE_CASE ) == KEYMAP["mod_int"]:
_A = get_raw_chars()
if ord(_SCREAMING_SNAKE_CASE ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_SCREAMING_SNAKE_CASE ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_SCREAMING_SNAKE_CASE ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 27 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''T''')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = data
lowercase = None
def __str__( self ):
return F'''{self.data}'''
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
lowercase = None
def __iter__( self ):
lowercase = self.top
while node:
yield node.data
lowercase = node.next
def __str__( self ):
return "->".join([str(snake_case ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.top is None
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = Node(snake_case )
if not self.is_empty():
lowercase = self.top
lowercase = node
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , snake_case )
lowercase = self.top
lowercase = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Tuple ,__UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ,keep_in_memory=__UpperCamelCase ).read()
_check_parquet_dataset(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
'features' ,[
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] ,)
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path / 'cache'
SCREAMING_SNAKE_CASE : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(__UpperCamelCase ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_parquet_dataset(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = tmp_path / 'cache'
SCREAMING_SNAKE_CASE : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ,split=__UpperCamelCase ).read()
_check_parquet_dataset(__UpperCamelCase ,__UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' ,[str, list] )
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if issubclass(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : int = parquet_path
elif issubclass(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Dict = [parquet_path]
SCREAMING_SNAKE_CASE : Tuple = tmp_path / 'cache'
SCREAMING_SNAKE_CASE : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_parquet_dataset(__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: str=("train",) ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
for split in splits:
SCREAMING_SNAKE_CASE : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: Dict ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(
{'train': parquet_path} ,cache_dir=__UpperCamelCase ,keep_in_memory=__UpperCamelCase ).read()
_check_parquet_datasetdict(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
'features' ,[
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] ,)
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: List[Any] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = tmp_path / 'cache'
SCREAMING_SNAKE_CASE : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
SCREAMING_SNAKE_CASE : int = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : Union[str, Any] = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader({'train': parquet_path} ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_parquet_datasetdict(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE : Optional[Any] = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'train'
SCREAMING_SNAKE_CASE : List[Any] = {'train': parquet_path, 'test': parquet_path}
SCREAMING_SNAKE_CASE : List[Any] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ).read()
_check_parquet_datasetdict(__UpperCamelCase ,__UpperCamelCase ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ParquetDatasetWriter(__UpperCamelCase ,tmp_path / 'foo.parquet' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : Any = pq.ParquetFile(tmp_path / 'foo.parquet' )
SCREAMING_SNAKE_CASE : List[str] = pf.read()
assert dataset.data.table == output_table
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = str(shared_datadir / 'test_image_rgb.jpg' )
SCREAMING_SNAKE_CASE : Any = {'image': [image_path]}
SCREAMING_SNAKE_CASE : Union[str, Any] = Features({'image': Image()} )
SCREAMING_SNAKE_CASE : Any = Dataset.from_dict(__UpperCamelCase ,features=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetWriter(__UpperCamelCase ,tmp_path / 'foo.parquet' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : int = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) ,streaming=__UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' ,[
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] ,)
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Dict ):
"""simple docstring"""
assert get_writer_batch_size(__UpperCamelCase ) == expected
| 28 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = LlamaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = LlamaModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
lowercase = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = True
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase : List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : int = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LlamaModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'single_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'multi_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 10] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = LlamaModel(snake_case )
original_model.to(snake_case )
original_model.eval()
lowercase = original_model(snake_case ).last_hidden_state
lowercase = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {'type': scaling_type, 'factor': 10.0}
lowercase = LlamaModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
lowercase = scaled_model(snake_case ).last_hidden_state
lowercase = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
lowercase = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowercase = 'Simply put, the theory of relativity states that '
lowercase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowercase = tokenizer.encode(snake_case , return_tensors='pt' )
lowercase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=snake_case )
# greedy generation outputs
lowercase = model.generate(snake_case , max_new_tokens=64 , top_p=snake_case , temperature=1 , do_sample=snake_case )
lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case )
self.assertEqual(snake_case , snake_case )
| 84 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = 0
lowerCamelCase_ = len(lowerCAmelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase__ ):
return None
lowerCamelCase_ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowerCamelCase_ = left
lowerCamelCase_ = point
elif point > right:
lowerCamelCase_ = right
lowerCamelCase_ = point
else:
if item < current_item:
lowerCamelCase_ = point - 1
else:
lowerCamelCase_ = point + 1
return None
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,point - 1 )
else:
return interpolation_search_by_recursion(
lowerCAmelCase__ ,lowerCAmelCase__ ,point + 1 ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
if collection != sorted(lowerCAmelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
A_ = 0
if debug == 1:
A_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
A_ = 67
A_ = interpolation_search(collection, target)
if result is not None:
print(f"{target} found at positions: {result}")
else:
print("""Not found""")
| 29 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__a = True
except ImportError:
__a = False
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __a( _a ):
"""simple docstring"""
@staticmethod
def a__ ( _SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Tuple = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' ,action='''store_true''' ,help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' ,type=_SCREAMING_SNAKE_CASE ,help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' ,type=_SCREAMING_SNAKE_CASE ,help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,*_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : str = testing
UpperCAmelCase_ : str = testing_file
UpperCAmelCase_ : Dict = path
def a__ ( self ) -> Optional[int]:
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase_ : List[Any] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCAmelCase_ : Union[str, Any] = (
Path(_SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCAmelCase_ : int = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file ,'''r''' ) as configuration_file:
UpperCAmelCase_ : str = json.load(_SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=_SCREAMING_SNAKE_CASE ,extra_context=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : str = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' ,'''r''' ) as configuration_file:
UpperCAmelCase_ : List[Any] = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = configuration['''lowercase_modelname''']
UpperCAmelCase_ : Optional[int] = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f'''{directory}/configuration.json''' )
UpperCAmelCase_ : List[str] = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ : Optional[int] = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ : List[str] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ : Optional[int] = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(_SCREAMING_SNAKE_CASE ,exist_ok=_SCREAMING_SNAKE_CASE )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' ,exist_ok=_SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' ,'''w''' ):
pass
shutil.move(
f'''{directory}/__init__.py''' ,f'''{model_dir}/__init__.py''' ,)
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' ,f'''{model_dir}/configuration_{lowercase_model_name}.py''' ,)
def remove_copy_lines(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE ,'''r''' ) as f:
UpperCAmelCase_ : Optional[int] = f.readlines()
with open(_SCREAMING_SNAKE_CASE ,'''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' ,f'''{model_dir}/modeling_{lowercase_model_name}.py''' ,)
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' ,f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' ,)
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' ,f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' ,)
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' ,f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' ,)
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' ,f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' ,)
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' ,f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' ,)
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' ,f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' ,)
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' ,f'''{model_dir}/tokenization_{lowercase_model_name}.py''' ,)
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' ,f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCAmelCase_, UpperCAmelCase_ : Any = mkstemp()
UpperCAmelCase_ : List[Any] = False
with fdopen(_SCREAMING_SNAKE_CASE ,'''w''' ) as new_file:
with open(_SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(_SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCAmelCase_ : List[str] = True
for line_to_copy in lines_to_copy:
new_file.write(_SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Remove original file
remove(_SCREAMING_SNAKE_CASE )
# Move new file
move(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def skip_units(_SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE ) as datafile:
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : int = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase_ : List[Any] = line.split('''"''' )[1]
UpperCAmelCase_ : Tuple = skip_units(_SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCAmelCase_ : Optional[int] = line.split('''"''' )[1]
UpperCAmelCase_ : List[str] = skip_units(_SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase_ : Dict = []
elif "##" not in line:
lines_to_copy.append(_SCREAMING_SNAKE_CASE )
remove(_SCREAMING_SNAKE_CASE )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(_SCREAMING_SNAKE_CASE ) | 30 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = OpenAIGPTTokenizer
_UpperCamelCase : List[Any] = OpenAIGPTTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowercase = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase = 'lower'
lowercase = ['low', 'er</w>']
lowercase = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = tokens + ['<unk>']
lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
lowercase = 'This is a simple input'
lowercase = ['This is a simple input 1', 'This is a simple input 2']
lowercase = ('This is a simple input', 'This is a pair')
lowercase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A_ ( __lowerCamelCase ):
'''simple docstring'''
pass
| 84 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , **_lowerCAmelCase : List[str] ):
requires_backends(self , ['bs4'] )
super().__init__(**_lowerCAmelCase )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
SCREAMING_SNAKE_CASE_ = parent.find_all(child.name , recursive=_lowerCAmelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_lowerCAmelCase ) else next(i for i, s in enumerate(_lowerCAmelCase , 1 ) if s is child ) )
SCREAMING_SNAKE_CASE_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = BeautifulSoup(_lowerCAmelCase , 'html.parser' )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for element in html_code.descendants:
if type(_lowerCAmelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
SCREAMING_SNAKE_CASE_ = html.unescape(_lowerCAmelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.xpath_soup(_lowerCAmelCase )
stringaxtag_seq.append(_lowerCAmelCase )
stringaxsubs_seq.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = ''
for tagname, subs in zip(_lowerCAmelCase , _lowerCAmelCase ):
xpath += F"/{tagname}"
if subs != 0:
xpath += F"[{subs}]"
return xpath
def __call__( self : Optional[int] , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = False
# Check that strings has a valid type
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = True
elif isinstance(_lowerCAmelCase , (list, tuple) ):
if len(_lowerCAmelCase ) == 0 or isinstance(html_strings[0] , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
F"but is of type {type(_lowerCAmelCase )}." )
SCREAMING_SNAKE_CASE_ = bool(isinstance(_lowerCAmelCase , (list, tuple) ) and (isinstance(html_strings[0] , _lowerCAmelCase )) )
if not is_batched:
SCREAMING_SNAKE_CASE_ = [html_strings]
# Get nodes + xpaths
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for html_string in html_strings:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_three_from_single(_lowerCAmelCase )
nodes.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = []
for node, tag_list, sub_list in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = self.construct_xpath(_lowerCAmelCase , _lowerCAmelCase )
xpath_strings.append(_lowerCAmelCase )
xpaths.append(_lowerCAmelCase )
# return as Dict
SCREAMING_SNAKE_CASE_ = {'nodes': nodes, 'xpaths': xpaths}
SCREAMING_SNAKE_CASE_ = BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
return encoded_inputs | 31 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase = 0
# Doctest custom flag to ignore output.
UpperCAmelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase = doctest.OutputChecker
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , snake_case , snake_case , snake_case )
UpperCAmelCase = CustomOutputChecker
UpperCAmelCase = HfDoctestModule
UpperCAmelCase = HfDocTestParser
| 84 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir("fixtures")
UpperCAmelCase_ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCAmelCase_ = get_tests_dir("fixtures/dummy-config.json")
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = 0
def UpperCamelCase( self ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase ).to_dict()
config_dict.pop('''feature_extractor_type''' )
_UpperCAmelCase = WavaVecaFeatureExtractor(**_UpperCamelCase )
# save in new folder
model_config.save_pretrained(_UpperCamelCase )
config.save_pretrained(_UpperCamelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
# make sure private variable is not incorrectly saved
_UpperCAmelCase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
with self.assertRaisesRegex(
_UpperCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def UpperCamelCase( self ):
with self.assertRaisesRegex(
_UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase , revision='''aaaaaa''' )
def UpperCamelCase( self ):
with self.assertRaisesRegex(
_UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def UpperCamelCase( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_UpperCamelCase ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCamelCase ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCamelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_UpperCamelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase , trust_remote_code=_UpperCamelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def UpperCamelCase( self ):
try:
AutoConfig.register('''custom''' , _UpperCamelCase )
AutoFeatureExtractor.register(_UpperCamelCase , _UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCamelCase ):
AutoFeatureExtractor.register(_UpperCamelCase , _UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCAmelCase = CustomFeatureExtractor.from_pretrained(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_UpperCamelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase( self ):
class __UpperCamelCase ( A__ ):
__A : Dict = True
try:
AutoConfig.register('''custom''' , _UpperCamelCase )
AutoFeatureExtractor.register(_UpperCamelCase , _UpperCamelCase )
# If remote code is not set, the default is to use local
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(_UpperCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] | 32 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False ):
super().__init__()
lowercase = n_token
lowercase = d_embed
lowercase = d_proj
lowercase = cutoffs + [n_token]
lowercase = [0] + self.cutoffs
lowercase = div_val
lowercase = self.cutoffs[0]
lowercase = len(self.cutoffs ) - 1
lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase = nn.ModuleList()
lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
else:
self.out_projs.append(snake_case )
self.out_layers.append(nn.Linear(snake_case , snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
self.out_layers.append(nn.Linear(snake_case , r_idx - l_idx ) )
lowercase = keep_order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if proj is None:
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase = nn.functional.linear(snake_case , proj.t().contiguous() )
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowercase = hidden[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = hidden.view(-1 , hidden.size(-1 ) )
lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowercase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase = labels != -100
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = (
-nn.functional.log_softmax(snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase = nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
if labels is None:
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = 0
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase = (labels >= l_idx) & (labels < r_idx)
lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase = labels.index_select(0 , snake_case ) - l_idx
lowercase = head_logprob.index_select(0 , snake_case )
lowercase = hidden.index_select(0 , snake_case )
else:
lowercase = hidden
if i == 0:
if labels is not None:
lowercase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = head_logprob[:, -i] + tail_logprob_i
lowercase = logprob_i
return out
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Any = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case ) != 0:
lowercase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case ) != cols:
raise error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise error
lowercase = rows
else:
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case ).determinant()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if (row + column) % 2 == 0:
return self.get_minor(snake_case , snake_case )
return -1 * self.get_minor(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[self.get_minor(snake_case , snake_case ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case )
else:
lowercase = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in column:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
lowercase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , snake_case ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , snake_case ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , snake_case ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , snake_case ):
if isinstance(snake_case , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case , snake_case ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case , snake_case ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
lowercase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case ):
return sum(row[i] * column[i] for i in range(len(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = data
def __iter__( self) -> Tuple:
for element in self.data:
yield element
def __snake_case ( _lowercase=True ):
"""simple docstring"""
UpperCamelCase = Accelerator(even_batches=_lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase = False ):
"""simple docstring"""
if iterable:
UpperCamelCase = DummyIterableDataset(torch.as_tensor(range(_lowercase ) ) )
else:
UpperCamelCase = TensorDataset(torch.as_tensor(range(_lowercase ) ) )
UpperCamelCase = DataLoader(_lowercase ,batch_size=_lowercase )
UpperCamelCase = accelerator.prepare(_lowercase )
return dl
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ,):
"""simple docstring"""
UpperCamelCase = create_dataloader(accelerator=_lowercase ,dataset_size=_lowercase ,batch_size=_lowercase )
UpperCamelCase = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_lowercase ,dataset_size=3 ,batch_size=1 ,process_0_expected_batch_sizes=[1, 1] ,process_1_expected_batch_sizes=[1, 1] ,)
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_lowercase ,dataset_size=7 ,batch_size=2 ,process_0_expected_batch_sizes=[2, 2] ,process_1_expected_batch_sizes=[2, 2] ,)
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator(even_batches=_lowercase )
verify_dataloader_batch_sizes(
_lowercase ,dataset_size=3 ,batch_size=1 ,process_0_expected_batch_sizes=[1, 1] ,process_1_expected_batch_sizes=[1] ,)
verify_dataloader_batch_sizes(
_lowercase ,dataset_size=7 ,batch_size=2 ,process_0_expected_batch_sizes=[2, 2] ,process_1_expected_batch_sizes=[2, 1] ,)
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator(even_batches=_lowercase )
UpperCamelCase = torch.nn.Linear(1 ,1 )
UpperCamelCase = accelerator.prepare(_lowercase )
UpperCamelCase = create_dataloader(_lowercase ,dataset_size=3 ,batch_size=1 )
UpperCamelCase = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_lowercase ):
UpperCamelCase = ddp_model(batch[0].float() )
UpperCamelCase = output.sum()
loss.backward()
batch_idxs.append(_lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def __snake_case ( _lowercase ):
"""simple docstring"""
with warnings.catch_warnings(record=_lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category ,_lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = create_accelerator(even_batches=_lowercase )
UpperCamelCase = torch.nn.Linear(1 ,1 )
UpperCamelCase = accelerator.prepare(_lowercase )
UpperCamelCase = create_dataloader(_lowercase ,dataset_size=3 ,batch_size=1 )
UpperCamelCase = create_dataloader(_lowercase ,dataset_size=3 ,batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] ,even_batches=_lowercase ):
UpperCamelCase = train_dl.batch_sampler.even_batches
UpperCamelCase = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = create_accelerator(even_batches=_lowercase )
UpperCamelCase = torch.nn.Linear(1 ,1 )
UpperCamelCase = accelerator.prepare(_lowercase )
create_dataloader(_lowercase ,dataset_size=3 ,batch_size=1 ,iterable=_lowercase )
UpperCamelCase = create_dataloader(_lowercase ,dataset_size=3 ,batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] ,even_batches=_lowercase ):
UpperCamelCase = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator()
UpperCamelCase = torch.nn.Linear(1 ,1 )
UpperCamelCase = accelerator.prepare(_lowercase )
create_dataloader(_lowercase ,dataset_size=3 ,batch_size=1 ,iterable=_lowercase )
with warnings.catch_warnings(record=_lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] ,even_batches=_lowercase ):
pass
assert issubclass(w[-1].category ,_lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
UpperCamelCase = accelerator.state.distributed_type
UpperCamelCase = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_lowercase )
UpperCamelCase = original_state
if __name__ == "__main__":
main() | 34 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=8 ):
lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , ):
super().__init__()
self.register_modules(
unet=snake_case , scheduler=snake_case , movq=snake_case , )
lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
if latents is None:
lowercase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase = latents.to(snake_case )
lowercase = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case )
# We'll offload the last model manually.
lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self , snake_case , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ):
lowercase = self._execution_device
lowercase = guidance_scale > 1.0
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
lowercase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
if do_classifier_free_guidance:
lowercase = image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = negative_image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case )
self.scheduler.set_timesteps(snake_case , device=snake_case )
lowercase = self.scheduler.timesteps
lowercase = self.unet.config.in_channels
lowercase , lowercase = downscale_height_and_width(snake_case , snake_case , self.movq_scale_factor )
# create initial latent
lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case , snake_case , snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = {'image_embeds': image_embeds}
lowercase = self.unet(
sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0]
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase = noise_pred.chunk(2 )
lowercase , lowercase = variance_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(
snake_case , snake_case , snake_case , generator=snake_case , )[0]
# post-processing
lowercase = self.movq.decode(snake_case , force_not_quantize=snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase = image * 0.5 + 0.5
lowercase = image.clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 84 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def a ( A__ ) -> int:
'''simple docstring'''
return EnvironmentCommand()
def a ( A__ ) -> int:
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : int = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_lowercase )
def __init__( self : Any , _lowercase : int , *_lowercase : int ):
SCREAMING_SNAKE_CASE__ : Any = accelerate_config_file
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_safetensors_available():
import safetensors
SCREAMING_SNAKE_CASE__ : int = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
SCREAMING_SNAKE_CASE__ : Any = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
SCREAMING_SNAKE_CASE__ : int = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[str] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = load_config_from_file(self._accelerate_config_file ).to_dict()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
'''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_lowercase , _lowercase )
else f"""\t{accelerate_config}"""
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : int = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Tuple = torch.__version__
SCREAMING_SNAKE_CASE__ : int = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''NA'''
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ : int = tf.__version__
try:
# deprecated in v2.1
SCREAMING_SNAKE_CASE__ : Tuple = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
SCREAMING_SNAKE_CASE__ : List[Any] = bool(tf.config.list_physical_devices('''GPU''' ) )
SCREAMING_SNAKE_CASE__ : int = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[str] = '''not installed'''
SCREAMING_SNAKE_CASE__ : int = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
SCREAMING_SNAKE_CASE__ : Optional[int] = flax.__version__
SCREAMING_SNAKE_CASE__ : Optional[int] = jax.__version__
SCREAMING_SNAKE_CASE__ : int = jaxlib.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = jax.lib.xla_bridge.get_backend().platform
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f"""{safetensors_version}""",
'''Accelerate version''': f"""{accelerate_version}""",
'''Accelerate config''': f"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': f"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': f"""{flax_version} ({jax_backend})""",
'''Jax version''': f"""{jax_version}""",
'''JaxLib version''': f"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Any ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 84 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _A ( snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = XLMRobertaTokenizer
__lowerCamelCase : Dict = XLMRobertaTokenizerFast
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : List[str] = True
def snake_case_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : Optional[Any] = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ ,keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = """<pad>"""
snake_case : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-1] ,"""<mask>""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,1002 )
def snake_case_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1002 )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ ,keep_accents=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
snake_case : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
snake_case : str = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
snake_case : List[str] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def snake_case_ ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case : Dict = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Dict = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Dict = tempfile.mkdtemp()
snake_case : str = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : Any = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case : str = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
snake_case : str = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ ,legacy_format=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
snake_case : int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : List[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ ,legacy_format=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case : List[str] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def snake_case_ ( self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE_ ,f.name )
snake_case : Dict = XLMRobertaTokenizer(f.name ,keep_accents=SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = pickle.dumps(SCREAMING_SNAKE_CASE_ )
pickle.loads(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : str = self.get_rust_tokenizer()
snake_case : Optional[int] = """I was born in 92000, and this is falsé."""
snake_case : Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = self.get_rust_tokenizer()
snake_case : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = """Hello World!"""
snake_case : Any = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
snake_case : Union[str, Any] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def snake_case_ ( self ):
'''simple docstring'''
# fmt: off
snake_case : List[Any] = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ ,model_name="""xlm-roberta-base""" ,revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" ,)
| 36 |
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
return n == n[::-1]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100_0000 ):
lowercase = 0
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if is_palindrome(__SCREAMING_SNAKE_CASE ) and is_palindrome(bin(__SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 84 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase : Optional[int] = get_tests_dir("""fixtures""")
UpperCamelCase : List[Any] = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
UpperCamelCase : List[str] = get_tests_dir("""fixtures/dummy-config.json""")
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : str ):
a__ : int = 0
def _UpperCamelCase( self : Dict ):
a__ : Any = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : str ):
a__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
a__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("feature_extractor_type" )
a__ : Tuple = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
a__ : Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
a__ : Dict = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Any ):
with self.assertRaisesRegex(
lowerCamelCase__ , "bert-base is not a local folder and is not a valid model identifier" ):
a__ : Tuple = AutoFeatureExtractor.from_pretrained("bert-base" )
def _UpperCamelCase( self : List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
a__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="aaaaaa" )
def _UpperCamelCase( self : Union[str, Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
a__ : List[Any] = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def _UpperCamelCase( self : Union[str, Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
a__ : str = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
a__ : Tuple = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=lowerCamelCase__ )
a__ : List[str] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
a__ : Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def _UpperCamelCase( self : Optional[Any] ):
try:
AutoConfig.register("custom" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
a__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
a__ : Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase( self : int ):
class A__ ( A__ ):
"""simple docstring"""
_lowercase = True
try:
AutoConfig.register("custom" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
a__ : Dict = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
a__ : List[str] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
a__ : int = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(lowerCamelCase__ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 37 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LEDTokenizer
lowerCamelCase__ = LEDTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
snake_case__ : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def __UpperCamelCase ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case__ : Optional[int] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Any = tokenizer(__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case__ : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Dict = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIn("""input_ids""" , __SCREAMING_SNAKE_CASE )
self.assertIn("""attention_mask""" , __SCREAMING_SNAKE_CASE )
self.assertNotIn("""labels""" , __SCREAMING_SNAKE_CASE )
self.assertNotIn("""decoder_attention_mask""" , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Union[str, Any] = tokenizer(text_target=__SCREAMING_SNAKE_CASE , max_length=3_2 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
@require_torch
def __UpperCamelCase ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[str] = tokenizer(
["""I am a small frog""" * 1_0_2_4, """I am a small frog"""] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : str = ["""A long paragraph for summarization."""]
snake_case__ : List[Any] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Any = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = tokenizer(text_target=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : List[Any] = inputs["""input_ids"""]
snake_case__ : int = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Dict = ["""Summary of the text.""", """Another summary."""]
snake_case__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
snake_case__ : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = [[0] * len(__SCREAMING_SNAKE_CASE ) for x in encoded_output["""input_ids"""]]
snake_case__ : Optional[int] = tokenizer.pad(__SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : str = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = """A, <mask> AllenNLP sentence."""
snake_case__ : List[str] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 38 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE : Any = "BridgeTowerImageProcessor"
SCREAMING_SNAKE_CASE : Any = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict ) ->str:
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Union[bool, str, PaddingStrategy] = False , _UpperCamelCase : Union[bool, str, TruncationStrategy] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : Optional[int] , ) ->BatchEncoding:
snake_case_ = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
snake_case_ = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def snake_case__( self : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : List[str] ) ->List[str]:
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Optional[int] , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Tuple ) ->List[str]:
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def snake_case__( self : List[str] ) ->Tuple:
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 39 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = []
lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
cnt += 1
topo.append(__SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
if cnt != len(__SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(__SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 84 | 0 |
from collections import Counter
from timeit import timeit
def UpperCamelCase ( snake_case__ : str = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def UpperCamelCase ( snake_case__ : str = "" ) -> bool:
if len(snake_case__ ) == 0:
return True
UpperCamelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCamelCase : dict[str, int] = {}
for character in lower_case_input_str:
UpperCamelCase : List[Any] = character_freq_dict.get(snake_case__ , 0 ) + 1
UpperCamelCase : int = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def UpperCamelCase ( snake_case__ : str = "" ) -> None:
print('\nFor string = ' , snake_case__ , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(snake_case__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(snake_case__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
__UpperCAmelCase = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
__UpperCAmelCase = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 40 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _A ( A__ , A__ , A__ , A__=5 ):
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
__lowercase = torch.tensor(tokenizer.encode(A__ , add_special_tokens=A__ ) ).unsqueeze(0 ) # Batch size 1
__lowercase = model(A__ )[0] # The last hidden-state is the first element of the output tuple
__lowercase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__lowercase = logits[0, masked_index, :]
__lowercase = logits.softmax(dim=0 )
__lowercase , __lowercase = prob.topk(k=A__ , dim=0 )
__lowercase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A__ ) )] )
__lowercase = tokenizer.mask_token
__lowercase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
__lowercase = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(A__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(A__ ) , A__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A__ , A__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCAmelCase__ = CamembertTokenizer.from_pretrained('''camembert-base''')
lowerCAmelCase__ = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowerCAmelCase__ = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 0 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase="" ,__UpperCamelCase="." ):
lowerCamelCase_ = []
for k, v in d.items():
lowerCamelCase_ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase ,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase ,sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
lowerCamelCase_ = argparse.Namespace()
with open(__UpperCamelCase ,'r' ) as yaml_file:
try:
lowerCamelCase_ = yaml.load(__UpperCamelCase ,Loader=yaml.FullLoader )
lowerCamelCase_ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase ,str(__UpperCamelCase ) ) )
return config
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = MobileViTVaConfig()
lowerCamelCase_ = False
# dataset
if task_name.startswith('imagenet1k_' ):
lowerCamelCase_ = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowerCamelCase_ = 3_84
else:
lowerCamelCase_ = 2_56
lowerCamelCase_ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowerCamelCase_ = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowerCamelCase_ = 3_84
else:
lowerCamelCase_ = 2_56
lowerCamelCase_ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowerCamelCase_ = 1_51
lowerCamelCase_ = 5_12
lowerCamelCase_ = 'ade20k-id2label.json'
lowerCamelCase_ = True
elif task_name.startswith('voc_' ):
lowerCamelCase_ = 21
lowerCamelCase_ = 5_12
lowerCamelCase_ = 'pascal-voc-id2label.json'
lowerCamelCase_ = True
# orig_config
lowerCamelCase_ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase ,'model.classification.name' ,-1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.mitv2.width_multiplier' ,1.0 )
assert (
getattr(__UpperCamelCase ,'model.classification.mitv2.attn_norm_layer' ,-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.activation.name' ,'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.output_stride' ,16 )
if "_deeplabv3" in task_name:
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_rates' ,[12, 24, 36] )
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_out_channels' ,5_12 )
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_dropout' ,0.1 )
# id2label
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) )
lowerCamelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
lowerCamelCase_ = dct.pop(__UpperCamelCase )
lowerCamelCase_ = val
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> Dict:
if base_model:
lowerCamelCase_ = ''
else:
lowerCamelCase_ = 'mobilevitv2.'
lowerCamelCase_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase_ = k[8:]
else:
lowerCamelCase_ = k
if ".block." in k:
lowerCamelCase_ = k_new.replace('.block.' ,'.' )
if ".conv." in k:
lowerCamelCase_ = k_new.replace('.conv.' ,'.convolution.' )
if ".norm." in k:
lowerCamelCase_ = k_new.replace('.norm.' ,'.normalization.' )
if "conv_1." in k:
lowerCamelCase_ = k_new.replace('conv_1.' ,f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase_ = k_new.replace('.exp_1x1.' ,'.expand_1x1.' )
if ".red_1x1." in k:
lowerCamelCase_ = k_new.replace('.red_1x1.' ,'.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase_ = [0, 1]
elif i == 4:
lowerCamelCase_ = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase_ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase_ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' ,f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase_ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.conv_proj.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase_ = k_new.replace('pre_norm_attn.0.' ,'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowerCamelCase_ = k_new.replace('pre_norm_attn.1.' ,'attention.' )
if "pre_norm_ffn.0." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.0.' ,'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.1.' ,'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.3.' ,'ffn.conv2.' )
if "classifier.1." in k:
lowerCamelCase_ = k_new.replace('classifier.1.' ,'classifier.' )
if "seg_head." in k:
lowerCamelCase_ = k_new.replace('seg_head.' ,'segmentation_head.' )
if ".aspp_layer." in k:
lowerCamelCase_ = k_new.replace('.aspp_layer.' ,'.' )
if ".aspp_pool." in k:
lowerCamelCase_ = k_new.replace('.aspp_pool.' ,'.' )
rename_keys.append((k, k_new) )
return rename_keys
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Optional[Any]:
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = get_mobilevitva_config(__UpperCamelCase ,__UpperCamelCase )
# load original state_dict
lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowerCamelCase_ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
lowerCamelCase_ = False
else:
lowerCamelCase_ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
lowerCamelCase_ = False
# remove and rename some keys of load the original model
lowerCamelCase_ = checkpoint
remove_unused_keys(__UpperCamelCase )
lowerCamelCase_ = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
lowerCamelCase_ = image_processor(images=prepare_img() ,return_tensors='pt' )
lowerCamelCase_ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' ,model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 42 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase = '''true'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=16 ):
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(__SCREAMING_SNAKE_CASE )
lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
lowercase = dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=16 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
lowercase = get_dataloader(__SCREAMING_SNAKE_CASE , not dispatch_batches )
lowercase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for batch in dataloader:
lowercase , lowercase = batch.values()
with torch.no_grad():
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase , lowercase = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=16 ):
lowercase , lowercase , lowercase = get_basic_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase , lowercase = generate_predictions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False ):
lowercase = evaluate.load('glue' , 'mrpc' )
lowercase , lowercase = get_mrpc_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# First do baseline
lowercase , lowercase , lowercase = setup['no']
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=batch['labels'] )
lowercase = metric.compute()
# Then do distributed
lowercase , lowercase , lowercase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase = batch['labels']
lowercase , lowercase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
lowercase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase_ ( ):
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 84 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase = logging.getLogger(__name__)
lowerCAmelCase = tf.data.AUTOTUNE
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=SCREAMING_SNAKE_CASE , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=SCREAMING_SNAKE_CASE , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=SCREAMING_SNAKE_CASE , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=SCREAMING_SNAKE_CASE , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=SCREAMING_SNAKE_CASE , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=SCREAMING_SNAKE_CASE , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=SCREAMING_SNAKE_CASE , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=SCREAMING_SNAKE_CASE , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=SCREAMING_SNAKE_CASE , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=SCREAMING_SNAKE_CASE , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=SCREAMING_SNAKE_CASE , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE , default=5_12 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=SCREAMING_SNAKE_CASE , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=SCREAMING_SNAKE_CASE , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase__ = parser.parse_args()
return args
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
if args.tpu_name:
lowercase__ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(SCREAMING_SNAKE_CASE )
return tpu
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 0
for file in file_list:
lowercase__ = file.split('''/''' )[-1]
lowercase__ = re.search(R'''-\d+-(\d+)\.tfrecord''' , SCREAMING_SNAKE_CASE ).group(1 )
lowercase__ = int(SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase__ = count_samples(SCREAMING_SNAKE_CASE )
lowercase__ = tf.data.Dataset.from_tensor_slices(SCREAMING_SNAKE_CASE )
if shuffle:
lowercase__ = dataset.shuffle(len(SCREAMING_SNAKE_CASE ) )
lowercase__ = tf.data.TFRecordDataset(SCREAMING_SNAKE_CASE , num_parallel_reads=SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase__ = dataset.apply(tf.data.experimental.assert_cardinality(SCREAMING_SNAKE_CASE ) )
lowercase__ = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
lowercase__ = dataset.shuffle(args.shuffle_buffer_size )
lowercase__ = dataset.batch(SCREAMING_SNAKE_CASE , drop_remainder=SCREAMING_SNAKE_CASE )
lowercase__ = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
lowercase__ = dataset.prefetch(SCREAMING_SNAKE_CASE )
return dataset
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not args.no_tpu:
lowercase__ = initialize_tpu(SCREAMING_SNAKE_CASE )
lowercase__ = tf.distribute.TPUStrategy(SCREAMING_SNAKE_CASE )
else:
lowercase__ = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase__ = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase__ = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase__ = tokenizer.vocab_size
lowercase__ = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
lowercase__ = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
lowercase__ = count_samples(SCREAMING_SNAKE_CASE )
lowercase__ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase__ = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase__ = TFAutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase__ , lowercase__ = create_optimizer(
num_train_steps=SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=SCREAMING_SNAKE_CASE , metrics=['''accuracy'''] )
def decode_fn(SCREAMING_SNAKE_CASE ):
lowercase__ = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase__ = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
def mask_with_collator(SCREAMING_SNAKE_CASE ):
# TF really needs an isin() function
lowercase__ = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase__ , lowercase__ = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=SCREAMING_SNAKE_CASE , )
return batch
lowercase__ = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase__ = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase__ = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , )
lowercase__ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=SCREAMING_SNAKE_CASE ) )
model.fit(
SCREAMING_SNAKE_CASE , validation_data=SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase = parse_args()
main(args)
| 43 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : Any = """OwlViTImageProcessor"""
_UpperCamelCase : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case=None , snake_case=None , **snake_case ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="max_length" , snake_case="np" , **snake_case ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(snake_case , snake_case ) or (isinstance(snake_case , snake_case ) and not isinstance(text[0] , snake_case )):
lowercase = [self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )]
elif isinstance(snake_case , snake_case ) and isinstance(text[0] , snake_case ):
lowercase = []
# Maximum number of queries across batch
lowercase = max([len(snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case ) != max_num_queries:
lowercase = t + [' '] * (max_num_queries - len(snake_case ))
lowercase = self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )
encodings.append(snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase = BatchEncoding()
lowercase = input_ids
lowercase = attention_mask
if query_images is not None:
lowercase = BatchEncoding()
lowercase = self.image_processor(
snake_case , return_tensors=snake_case , **snake_case ).pixel_values
lowercase = query_pixel_values
if images is not None:
lowercase = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_object_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_image_guided_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 84 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = list(_lowerCAmelCase )
_lowerCamelCase : List[Any] = list(_lowerCAmelCase )
_lowerCamelCase : List[str] = 0
for i in range(len(_lowerCAmelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : Dict = "_"
if count > 1:
return False
else:
return "".join(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCAmelCase )
_lowerCamelCase : Tuple = []
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + 1 , len(_lowerCAmelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Optional[int] = "*"
_lowerCamelCase : Any = "*"
temp.append("X" )
for i in range(len(_lowerCAmelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCAmelCase ) == 0:
return pi
_lowerCamelCase : int = list(set(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Sequence[float] ):
"""simple docstring"""
_lowerCamelCase : Tuple = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCAmelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCAmelCase )
return temp
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = list(_lowerCAmelCase )
_lowerCamelCase : Any = list(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCAmelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def A_ ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : list[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
_lowerCamelCase : str = [0] * len(_lowerCAmelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : int = 0
_lowerCamelCase : Union[str, Any] = -1
for j in range(len(_lowerCAmelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Optional[Any] = j
if count == 1:
_lowerCamelCase : List[Any] = 1
for i in range(len(_lowerCAmelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Optional[Any] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : int = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : List[str] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : List[Any] = count_n
_lowerCamelCase : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : int = 0
def A_ ( _lowerCAmelCase : list[str] , _lowerCAmelCase : list[str] ):
"""simple docstring"""
_lowerCamelCase : str = [[0 for x in range(len(_lowerCAmelCase ) )] for x in range(len(_lowerCAmelCase ) )]
for i in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : str = prime_implicants[i].count("_" )
for j in range(len(_lowerCAmelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCAmelCase ):
_lowerCamelCase : int = 1
return chart
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : Tuple = [
float(_lowerCAmelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Dict = decimal_to_binary(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : str = check(_lowerCAmelCase )
print("Prime Implicants are:" )
print(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = prime_implicant_chart(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Any = selection(_lowerCAmelCase , _lowerCAmelCase )
print("Essential Prime Implicants are:" )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 44 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case : bool = field(default=lowercase , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_snake_case : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def A ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
UpperCamelCase__ :Optional[Any] = import_module("""tasks""" )
try:
UpperCamelCase__ :Tuple = getattr(lowercase__ , model_args.task_type )
UpperCamelCase__ :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowercase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
UpperCamelCase__ :Dict = token_classification_task.get_labels(data_args.labels )
UpperCamelCase__ :Dict[int, str] = dict(enumerate(lowercase__ ) )
UpperCamelCase__ :Any = len(lowercase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , idalabel=lowercase__ , labelaid={label: i for i, label in enumerate(lowercase__ )} , cache_dir=model_args.cache_dir , )
UpperCamelCase__ :Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
UpperCamelCase__ :List[str] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase__ :List[str] = (
TokenClassificationDataset(
token_classification_task=lowercase__ , data_dir=data_args.data_dir , tokenizer=lowercase__ , labels=lowercase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase__ :Tuple = (
TokenClassificationDataset(
token_classification_task=lowercase__ , data_dir=data_args.data_dir , tokenizer=lowercase__ , labels=lowercase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> Tuple[List[int], List[int]]:
UpperCamelCase__ :List[Any] = np.argmax(lowercase__ , axis=2 )
UpperCamelCase__ , UpperCamelCase__ :int = preds.shape
UpperCamelCase__ :Optional[Any] = [[] for _ in range(lowercase__ )]
UpperCamelCase__ :Optional[int] = [[] for _ in range(lowercase__ )]
for i in range(lowercase__ ):
for j in range(lowercase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowercase__ : EvalPrediction ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ :str = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowercase__ , lowercase__ ),
"precision": precision_score(lowercase__ , lowercase__ ),
"recall": recall_score(lowercase__ , lowercase__ ),
"f1": fa_score(lowercase__ , lowercase__ ),
}
# Data collator
UpperCamelCase__ :int = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase__ :Dict = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ :Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase__ :List[str] = trainer.evaluate()
UpperCamelCase__ :str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowercase__ , lowercase__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowercase__ )
# Predict
if training_args.do_predict:
UpperCamelCase__ :Dict = TokenClassificationDataset(
token_classification_task=lowercase__ , data_dir=data_args.data_dir , tokenizer=lowercase__ , labels=lowercase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Any = trainer.predict(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = align_predictions(lowercase__ , lowercase__ )
UpperCamelCase__ :Dict = os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , lowercase__ , lowercase__ )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
UpperCamelCase__ :int = os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(lowercase__ , lowercase__ , lowercase__ )
return results
def A ( lowercase__ : int ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 45 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
lowercase = model(snake_case , token_type_ids=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = self.num_labels
lowercase = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCamelCase : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
lowercase = inputs_dict['labels']
lowercase = inputs_dict['labels']
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case )
lowercase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
lowercase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 84 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 46 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
# save results
if os.path.exists(lowerCamelCase_ ):
if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'config.json' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) )
if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) )
else:
os.makedirs(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ):
__a : Dict = 2
if unlogit:
__a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ )
__a : Any = p * torch.log(lowerCamelCase_ )
__a : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) )
for row in range(len(lowerCamelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ):
__a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
__a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
__a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
if head_mask is None:
__a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__a : Any = None
__a : Optional[int] = 0.0
__a : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__a : Dict = tuple(t.to(args.device ) for t in inputs )
((__a) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__a , __a , __a : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase_ ):
__a : List[str] = entropy(attn.detach() , lowerCamelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__a : Optional[Any] = 2
__a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(lowerCamelCase_ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(lowerCamelCase_ )
logger.info('Head ranked by importance scores' )
__a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__a : str = torch.arange(
head_importance.numel() , device=args.device )
__a : Tuple = head_ranks.view_as(lowerCamelCase_ )
print_ad_tensor(lowerCamelCase_ )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
__a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ )
__a : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold )
__a : Tuple = torch.ones_like(lowerCamelCase_ )
__a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__a : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
__a : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__a : List[str] = float('Inf' )
__a : List[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase_ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__a : Any = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__a : int = new_head_mask.view(-1 )
__a : Tuple = 0.0
__a : int = new_head_mask.view_as(lowerCamelCase_ )
__a : Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase_ )
# Compute metric and head importance again
__a , __a , __a : int = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(lowerCamelCase_ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
__a : List[Any] = datetime.now()
__a , __a , __a : List[str] = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[str] = 1 / loss
__a : List[Any] = datetime.now() - before_time
__a : List[str] = sum(p.numel() for p in model.parameters() )
__a : Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : Tuple = [
v,
]
assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase_ )
__a : Optional[Any] = sum(p.numel() for p in model.parameters() )
__a : Tuple = datetime.now()
__a , __a , __a : Tuple = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , )
__a : Optional[Any] = 1 / loss
__a : List[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(lowerCamelCase_ , args.output_dir )
def UpperCAmelCase__ ( ):
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' )
parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 )
parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
__a : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__a : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__a : Union[str, Any] = torch.device('cuda' , args.local_rank )
__a : Any = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__a : List[Any] = nn.parallel.DistributedDataParallel(
lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ )
elif args.n_gpu > 1:
__a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , lowerCamelCase_ )
# Prepare dataset
__a : Tuple = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__a : str = (torch.from_numpy(lowerCamelCase_ ),)
__a : List[str] = TensorDataset(*lowerCamelCase_ )
__a : Optional[Any] = RandomSampler(lowerCamelCase_ )
__a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 | 0 |
'''simple docstring'''
from typing import Any
def A ( UpperCamelCase_ : list , UpperCamelCase_ : list , UpperCamelCase_ : dict , UpperCamelCase_ : dict , UpperCamelCase_ : dict , ) -> list:
'''simple docstring'''
_validation(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# Creates data structures and fill initial step
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for state in states_space:
lowerCAmelCase__ = observations_space[0]
lowerCAmelCase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = observations_space[o]
lowerCAmelCase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase__ = ""
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
# Update probabilities and pointers dicts
lowerCAmelCase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase__ = arg_max
# The final observation
lowerCAmelCase__ = observations_space[len(UpperCamelCase_ ) - 1]
# argmax for given final observation
lowerCAmelCase__ = ""
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
lowerCAmelCase__ = arg_max
# Process pointers backwards
lowerCAmelCase__ = last_state
lowerCAmelCase__ = []
for o in range(len(UpperCamelCase_ ) - 1 , -1 , -1 ):
result.append(UpperCamelCase_ )
lowerCAmelCase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
_validate_lists(UpperCamelCase_ , UpperCamelCase_ )
_validate_dicts(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> None:
'''simple docstring'''
_validate_list(UpperCamelCase_ , "observations_space" )
_validate_list(UpperCamelCase_ , "states_space" )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , UpperCamelCase_ ):
lowerCAmelCase__ = F"""{var_name} must be a list"""
raise ValueError(UpperCamelCase_ )
else:
for x in _object:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = F"""{var_name} must be a list of strings"""
raise ValueError(UpperCamelCase_ )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , ) -> None:
'''simple docstring'''
_validate_dict(UpperCamelCase_ , "initial_probabilities" , UpperCamelCase_ )
_validate_nested_dict(UpperCamelCase_ , "transition_probabilities" )
_validate_nested_dict(UpperCamelCase_ , "emission_probabilities" )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , UpperCamelCase_ , UpperCamelCase_ )
for x in _object.values():
_validate_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : type , UpperCamelCase_ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , UpperCamelCase_ ):
lowerCAmelCase__ = F"""{var_name} must be a dict"""
raise ValueError(UpperCamelCase_ )
if not all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for x in _object ):
lowerCAmelCase__ = F"""{var_name} all keys must be strings"""
raise ValueError(UpperCamelCase_ )
if not all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for x in _object.values() ):
lowerCAmelCase__ = "nested dictionary " if nested else ""
lowerCAmelCase__ = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(UpperCamelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
UpperCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
UpperCAmelCase = re.compile(R'''^\s*else:''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.readlines()
lowercase = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase = re.findall(r'\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase = []
for key in import_dict_objects.keys():
lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
lowercase = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
lowercase = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
UpperCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase = direct_transformers_import(__SCREAMING_SNAKE_CASE )
lowercase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' ) as f:
lowercase = f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , __SCREAMING_SNAKE_CASE ) ) )
lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 84 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Dict = 'Hello, World!'
_lowercase : Optional[int] = 'en_XX'
def lowercase__ ( snake_case_ :str , snake_case_ :str , snake_case_ :bool ):
__UpperCAmelCase = Path('''data_bin''' )
__UpperCAmelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case_ ).parent ) , checkpoint_file=Path(snake_case_ ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(snake_case_ ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(snake_case_ ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(snake_case_ )
__UpperCAmelCase = xmod.model.encoder.sentence_encoder
__UpperCAmelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , snake_case_ )
__UpperCAmelCase = XmodForSequenceClassification(snake_case_ ) if classification_head else XmodForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCAmelCase = xmod_sent_encoder.embed_tokens.weight
__UpperCAmelCase = xmod_sent_encoder.embed_positions.weight
__UpperCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCAmelCase = xmod_sent_encoder.layernorm_embedding.weight
__UpperCAmelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCAmelCase = model.roberta.encoder.layer[i]
__UpperCAmelCase = xmod_sent_encoder.layers[i]
# self attention
__UpperCAmelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
__UpperCAmelCase = xmod_layer.self_attn.q_proj.weight
__UpperCAmelCase = xmod_layer.self_attn.q_proj.bias
__UpperCAmelCase = xmod_layer.self_attn.k_proj.weight
__UpperCAmelCase = xmod_layer.self_attn.k_proj.bias
__UpperCAmelCase = xmod_layer.self_attn.v_proj.weight
__UpperCAmelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCAmelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
__UpperCAmelCase = xmod_layer.self_attn.out_proj.weight
__UpperCAmelCase = xmod_layer.self_attn.out_proj.bias
__UpperCAmelCase = xmod_layer.self_attn_layer_norm.weight
__UpperCAmelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCAmelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
__UpperCAmelCase = xmod_layer.fca.weight
__UpperCAmelCase = xmod_layer.fca.bias
# output
__UpperCAmelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
__UpperCAmelCase = xmod_layer.fca.weight
__UpperCAmelCase = xmod_layer.fca.bias
__UpperCAmelCase = xmod_layer.final_layer_norm.weight
__UpperCAmelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCAmelCase = xmod_layer.adapter_layer_norm.weight
__UpperCAmelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCAmelCase = bert_output.adapter_modules[lang_code]
__UpperCAmelCase = xmod_layer.adapter_modules[lang_code]
__UpperCAmelCase = from_adapter.fca.weight
__UpperCAmelCase = from_adapter.fca.bias
__UpperCAmelCase = from_adapter.fca.weight
__UpperCAmelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCAmelCase = xmod_sent_encoder.layer_norm.weight
__UpperCAmelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCAmelCase = xmod.model.classification_heads['''mnli'''].dense.weight
__UpperCAmelCase = xmod.model.classification_heads['''mnli'''].dense.bias
__UpperCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight
__UpperCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__UpperCAmelCase = xmod.model.encoder.lm_head.dense.weight
__UpperCAmelCase = xmod.model.encoder.lm_head.dense.bias
__UpperCAmelCase = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCAmelCase = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCAmelCase = xmod.model.encoder.lm_head.weight
__UpperCAmelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCAmelCase = xmod.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case_ )
__UpperCAmelCase = model(snake_case_ )[0]
if classification_head:
__UpperCAmelCase = xmod.model.classification_heads['''mnli'''](xmod.extract_features(snake_case_ ) )
else:
__UpperCAmelCase = xmod.model(snake_case_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_lowercase : Dict = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''T''')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = data
lowercase = None
def __str__( self ):
return F'''{self.data}'''
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
lowercase = None
def __iter__( self ):
lowercase = self.top
while node:
yield node.data
lowercase = node.next
def __str__( self ):
return "->".join([str(snake_case ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.top is None
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = Node(snake_case )
if not self.is_empty():
lowercase = self.top
lowercase = node
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , snake_case )
lowercase = self.top
lowercase = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84 | 0 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = (UnCLIPScheduler,)
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
lowerCamelCase__ = {
"""num_train_timesteps""": 10_00,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def UpperCamelCase_ ( self ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase ,prev_timestep=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type="""learned_range""" )
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
lowerCamelCase__ = 0.5
assert scheduler._get_variance(1 ,predicted_variance=_lowerCAmelCase ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_87 ,predicted_variance=_lowerCAmelCase ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_99 ,predicted_variance=_lowerCAmelCase ) - -0.001_0011 < 1E-5
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
lowerCamelCase__ = None
else:
lowerCamelCase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,prev_timestep=_lowerCAmelCase ,generator=_lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ):
pass
| 50 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = LlamaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = LlamaModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
lowercase = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = True
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase : List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : int = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LlamaModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'single_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'multi_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 10] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = LlamaModel(snake_case )
original_model.to(snake_case )
original_model.eval()
lowercase = original_model(snake_case ).last_hidden_state
lowercase = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {'type': scaling_type, 'factor': 10.0}
lowercase = LlamaModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
lowercase = scaled_model(snake_case ).last_hidden_state
lowercase = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
lowercase = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowercase = 'Simply put, the theory of relativity states that '
lowercase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowercase = tokenizer.encode(snake_case , return_tensors='pt' )
lowercase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=snake_case )
# greedy generation outputs
lowercase = model.generate(snake_case , max_new_tokens=64 , top_p=snake_case , temperature=1 , do_sample=snake_case )
lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case )
self.assertEqual(snake_case , snake_case )
| 84 | 0 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
a__ : int = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
a__ : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="maskformer"
_lowerCamelCase ={"hidden_size": "mask_feature_size"}
_lowerCamelCase =["resnet", "swin"]
_lowerCamelCase =["detr"]
def __init__( self : Tuple , a__ : int = 256 , a__ : int = 256 , a__ : float = 0.1 , a__ : bool = False , a__ : Optional[Dict] = None , a__ : Optional[Dict] = None , a__ : float = 0.02 , a__ : float = 1.0 , a__ : float = 1.0 , a__ : float = 1.0 , a__ : float = 20.0 , a__ : Optional[bool] = None , **a__ : List[Any] , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(a__ , a__ ):
UpperCAmelCase = backbone_config.pop('''model_type''' )
UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase = config_class.from_dict(a__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
f"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase = (
decoder_config.pop('''model_type''' ) if isinstance(a__ , a__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"Transformer Decoder {decoder_type} not supported, please use one of"
f" {','.join(self.decoders_supported )}" )
if isinstance(a__ , a__ ):
UpperCAmelCase = CONFIG_MAPPING[decoder_type]
UpperCAmelCase = config_class.from_dict(a__ )
UpperCAmelCase = backbone_config
UpperCAmelCase = decoder_config
# main feature dimension for the model
UpperCAmelCase = fpn_feature_size
UpperCAmelCase = mask_feature_size
# initializer
UpperCAmelCase = init_std
UpperCAmelCase = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase = cross_entropy_weight
UpperCAmelCase = dice_weight
UpperCAmelCase = mask_weight
UpperCAmelCase = use_auxiliary_loss
UpperCAmelCase = no_object_weight
UpperCAmelCase = output_auxiliary_logits
UpperCAmelCase = self.decoder_config.encoder_attention_heads
UpperCAmelCase = self.decoder_config.num_hidden_layers
super().__init__(**a__ )
@classmethod
def __snake_case ( cls : Tuple , a__ : PretrainedConfig , a__ : PretrainedConfig , **a__ : List[str] ):
return cls(
backbone_config=a__ , decoder_config=a__ , **a__ , )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.backbone_config.to_dict()
UpperCAmelCase = self.decoder_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 51 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }') | 52 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = OpenAIGPTTokenizer
_UpperCamelCase : List[Any] = OpenAIGPTTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowercase = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase = 'lower'
lowercase = ['low', 'er</w>']
lowercase = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = tokens + ['<unk>']
lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
lowercase = 'This is a simple input'
lowercase = ['This is a simple input 1', 'This is a simple input 2']
lowercase = ('This is a simple input', 'This is a pair')
lowercase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A_ ( __lowerCamelCase ):
'''simple docstring'''
pass
| 84 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case : Optional[int] = logging.getLogger(__name__)
_snake_case : str = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_snake_case : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
a_ = field(default=_UpperCamelCase , metadata={"""help""": """Whether ot not to use whole word mask."""} )
a_ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
a_ = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
a_ = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def a_ ( lowerCAmelCase_ : DataTrainingArguments, lowerCAmelCase_ : PreTrainedTokenizer, lowerCAmelCase_ : bool = False, lowerCAmelCase_ : Optional[str] = None, ):
def _dataset(lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=lowerCAmelCase_, file_path=lowerCAmelCase_, block_size=args.block_size, ref_path=lowerCAmelCase_, )
return LineByLineTextDataset(tokenizer=lowerCAmelCase_, file_path=lowerCAmelCase_, block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCAmelCase_, file_path=lowerCAmelCase_, block_size=args.block_size, overwrite_cache=args.overwrite_cache, cache_dir=lowerCAmelCase_, )
if evaluate:
return _dataset(args.eval_data_file, args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCAmelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file, args.train_ref_file )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelWithLMHead.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
__lowerCAmelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowerCAmelCase = min(data_args.block_size, tokenizer.max_len )
# Get datasets
__lowerCAmelCase = (
get_dataset(lowerCAmelCase_, tokenizer=lowerCAmelCase_, cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowerCAmelCase = (
get_dataset(lowerCAmelCase_, tokenizer=lowerCAmelCase_, evaluate=lowerCAmelCase_, cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowerCAmelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCAmelCase_, plm_probability=data_args.plm_probability, max_span_length=data_args.max_span_length, )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowerCAmelCase = DataCollatorForWholeWordMask(
tokenizer=lowerCAmelCase_, mlm_probability=data_args.mlm_probability )
else:
__lowerCAmelCase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase_, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, data_collator=lowerCAmelCase_, train_dataset=lowerCAmelCase_, eval_dataset=lowerCAmelCase_, prediction_loss_only=lowerCAmelCase_, )
# Training
if training_args.do_train:
__lowerCAmelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCAmelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = {'perplexity': perplexity}
__lowerCAmelCase = os.path.join(training_args.output_dir, 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s', lowerCAmelCase_, str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(lowerCAmelCase_ )
return results
def a_ ( lowerCAmelCase_ : Optional[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 53 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase = 0
# Doctest custom flag to ignore output.
UpperCAmelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase = doctest.OutputChecker
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , snake_case , snake_case , snake_case )
UpperCAmelCase = CustomOutputChecker
UpperCAmelCase = HfDoctestModule
UpperCAmelCase = HfDocTestParser
| 84 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A ( __lowercase ):
def __init__( self: int , *_lowerCAmelCase: List[Any] , _lowerCAmelCase: Any=None , _lowerCAmelCase: Union[str, Any]=None , **_lowerCAmelCase: Any ) -> List[str]:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ =eval_examples
UpperCAmelCase_ =post_process_function
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Optional[Dataset] = None , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Optional[List[str]] = None , _lowerCAmelCase: str = "eval" , **_lowerCAmelCase: Any , ) -> Dict[str, float]:
'''simple docstring'''
UpperCAmelCase_ =gen_kwargs.copy()
UpperCAmelCase_ =(
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
UpperCAmelCase_ =(
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
UpperCAmelCase_ =gen_kwargs
UpperCAmelCase_ =self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_ =self.get_eval_dataloader(_lowerCAmelCase )
UpperCAmelCase_ =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ =self.compute_metrics
UpperCAmelCase_ =None
UpperCAmelCase_ =time.time()
UpperCAmelCase_ =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ =eval_loop(
_lowerCAmelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
UpperCAmelCase_ =compute_metrics
UpperCAmelCase_ =self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_ =self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCAmelCase_ =metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase_ =output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_ =self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCAmelCase )
return metrics
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Tuple=None , _lowerCAmelCase: str = "test" , **_lowerCAmelCase: Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =gen_kwargs.copy()
UpperCAmelCase_ =self.get_test_dataloader(_lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ =self.compute_metrics
UpperCAmelCase_ =None
UpperCAmelCase_ =time.time()
UpperCAmelCase_ =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ =eval_loop(
_lowerCAmelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
UpperCAmelCase_ =compute_metrics
UpperCAmelCase_ =self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_ =self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , "predict" )
UpperCAmelCase_ =self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCAmelCase_ =metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCAmelCase )
| 54 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False ):
super().__init__()
lowercase = n_token
lowercase = d_embed
lowercase = d_proj
lowercase = cutoffs + [n_token]
lowercase = [0] + self.cutoffs
lowercase = div_val
lowercase = self.cutoffs[0]
lowercase = len(self.cutoffs ) - 1
lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase = nn.ModuleList()
lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
else:
self.out_projs.append(snake_case )
self.out_layers.append(nn.Linear(snake_case , snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
self.out_layers.append(nn.Linear(snake_case , r_idx - l_idx ) )
lowercase = keep_order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if proj is None:
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase = nn.functional.linear(snake_case , proj.t().contiguous() )
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowercase = hidden[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = hidden.view(-1 , hidden.size(-1 ) )
lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowercase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase = labels != -100
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = (
-nn.functional.log_softmax(snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase = nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
if labels is None:
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = 0
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase = (labels >= l_idx) & (labels < r_idx)
lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase = labels.index_select(0 , snake_case ) - l_idx
lowercase = head_logprob.index_select(0 , snake_case )
lowercase = hidden.index_select(0 , snake_case )
else:
lowercase = hidden
if i == 0:
if labels is not None:
lowercase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = head_logprob[:, -i] + tail_logprob_i
lowercase = logprob_i
return out
| 84 | 0 |
SCREAMING_SNAKE_CASE :Union[str, Any] = [0, 2, 4, 6, 8]
SCREAMING_SNAKE_CASE :Tuple = [1, 3, 5, 7, 9]
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__A = 0
for digit in range(1_0 ):
__A = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , a_ , a_ )
return result
__A = 0
for digita in range(1_0 ):
__A = digita
if (remainder + digita) % 2 == 0:
__A = ODD_DIGITS
else:
__A = EVEN_DIGITS
for digita in other_parity_digits:
__A = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , a_ , a_ , )
return result
def UpperCAmelCase ( a_ = 9 ) -> int:
"""simple docstring"""
__A = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a_ , 0 , [0] * length , a_ )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 |
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case ) != 0:
lowercase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case ) != cols:
raise error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise error
lowercase = rows
else:
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case ).determinant()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if (row + column) % 2 == 0:
return self.get_minor(snake_case , snake_case )
return -1 * self.get_minor(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[self.get_minor(snake_case , snake_case ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case )
else:
lowercase = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in column:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
lowercase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , snake_case ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , snake_case ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , snake_case ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , snake_case ):
if isinstance(snake_case , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case , snake_case ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case , snake_case ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
lowercase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case ):
return sum(row[i] * column[i] for i in range(len(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 0 |
'''simple docstring'''
def _a (lowercase__ : int ) -> list:
"""simple docstring"""
__snake_case = int(lowercase__ )
if n_element < 1:
__snake_case = ValueError('a should be a positive number' )
raise my_error
__snake_case = [1]
__snake_case , __snake_case , __snake_case = (0, 0, 0)
__snake_case = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_a : Optional[Any] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
_a : Tuple = hamming(int(n))
print("-----------------------------------------------------")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------")
| 56 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=8 ):
lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , ):
super().__init__()
self.register_modules(
unet=snake_case , scheduler=snake_case , movq=snake_case , )
lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
if latents is None:
lowercase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase = latents.to(snake_case )
lowercase = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case )
# We'll offload the last model manually.
lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self , snake_case , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ):
lowercase = self._execution_device
lowercase = guidance_scale > 1.0
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
lowercase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
if do_classifier_free_guidance:
lowercase = image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = negative_image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case )
self.scheduler.set_timesteps(snake_case , device=snake_case )
lowercase = self.scheduler.timesteps
lowercase = self.unet.config.in_channels
lowercase , lowercase = downscale_height_and_width(snake_case , snake_case , self.movq_scale_factor )
# create initial latent
lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case , snake_case , snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = {'image_embeds': image_embeds}
lowercase = self.unet(
sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0]
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase = noise_pred.chunk(2 )
lowercase , lowercase = variance_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(
snake_case , snake_case , snake_case , generator=snake_case , )[0]
# post-processing
lowercase = self.movq.decode(snake_case , force_not_quantize=snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase = image * 0.5 + 0.5
lowercase = image.clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : List[Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 57 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 84 | 0 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
features=_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase , streaming=_lowercase , num_proc=_lowercase , **_lowercase , )
snake_case_ : Tuple = Generator(
cache_dir=_lowercase , features=_lowercase , generator=_lowercase , gen_kwargs=_lowercase , **_lowercase , )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
if self.streaming:
snake_case_ : int = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
snake_case_ : Union[str, Any] = None
snake_case_ : Union[str, Any] = None
snake_case_ : Any = None
snake_case_ : str = None
self.builder.download_and_prepare(
download_config=_lowercase , download_mode=_lowercase , verification_mode=_lowercase , base_path=_lowercase , num_proc=self.num_proc , )
snake_case_ : Union[str, Any] = self.builder.as_dataset(
split="""train""" , verification_mode=_lowercase , in_memory=self.keep_in_memory )
return dataset
| 58 |
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
return n == n[::-1]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100_0000 ):
lowercase = 0
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if is_palindrome(__SCREAMING_SNAKE_CASE ) and is_palindrome(bin(__SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 84 | 0 |
def lowerCAmelCase_ ( __a ) -> list[list[float]]:
"""simple docstring"""
lowerCamelCase__: list[list[float]] =[]
for data in source_data:
for i, el in enumerate(__a ):
if len(__a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__a ) )
return data_lists
def lowerCAmelCase_ ( __a , __a ) -> list[list[float]]:
"""simple docstring"""
lowerCamelCase__: list[list[float]] =[]
for dlist, weight in zip(__a , __a ):
lowerCamelCase__: Dict =min(__a )
lowerCamelCase__: List[str] =max(__a )
lowerCamelCase__: list[float] =[]
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__: int =F"""Invalid weight of {weight:f} provided"""
raise ValueError(__a )
score_lists.append(__a )
return score_lists
def lowerCAmelCase_ ( __a ) -> list[float]:
"""simple docstring"""
lowerCamelCase__: list[float] =[0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__a ):
lowerCamelCase__: Dict =final_scores[j] + ele
return final_scores
def lowerCAmelCase_ ( __a , __a ) -> list[list[float]]:
"""simple docstring"""
lowerCamelCase__: List[Any] =get_data(__a )
lowerCamelCase__: List[str] =calculate_each_score(__a , __a )
lowerCamelCase__: Union[str, Any] =generate_final_scores(__a )
# append scores to source data
for i, ele in enumerate(__a ):
source_data[i].append(__a )
return source_data
| 59 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = np.argmax(_UpperCamelCase , axis=1 )
return np.sum(outputs == labels )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
with open(_UpperCamelCase , encoding='''utf_8''' ) as f:
snake_case_ : List[str] = csv.reader(_UpperCamelCase )
snake_case_ : Dict = []
next(_UpperCamelCase ) # skip the first line
for line in tqdm(_UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = []
for dataset in encoded_datasets:
snake_case_ : List[str] = len(_UpperCamelCase )
snake_case_ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
snake_case_ : Optional[Any] = np.zeros((n_batch, 2) , dtype=np.intaa )
snake_case_ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
snake_case_ : Dict = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCamelCase ):
snake_case_ : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ : List[Any] = with_conta
snake_case_ : List[str] = with_conta
snake_case_ : Optional[Any] = len(_UpperCamelCase ) - 1
snake_case_ : int = len(_UpperCamelCase ) - 1
snake_case_ : Optional[Any] = with_conta
snake_case_ : Union[str, Any] = with_conta
snake_case_ : Any = mc_label
snake_case_ : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_UpperCamelCase , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_UpperCamelCase , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_UpperCamelCase , default='''''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_UpperCamelCase , default=3 )
parser.add_argument('''--train_batch_size''' , type=_UpperCamelCase , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_UpperCamelCase , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=_UpperCamelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_UpperCamelCase , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_UpperCamelCase , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCamelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_UpperCamelCase , default=6.25E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_UpperCamelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_UpperCamelCase , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_UpperCamelCase , default=0.01 )
parser.add_argument('''--lm_coef''' , type=_UpperCamelCase , default=0.9 )
parser.add_argument('''--n_valid''' , type=_UpperCamelCase , default=374 )
parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
snake_case_ : List[Any] = parser.parse_args()
print(_UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
snake_case_ : int = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase , _UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
snake_case_ : Dict = ['''_start_''', '''_delimiter_''', '''_classify_''']
snake_case_ : Any = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCamelCase )
snake_case_ : Tuple = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
snake_case_ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCamelCase ) )
model.to(_UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCamelCase ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
return obj
return [tokenize_and_encode(_UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
snake_case_ : Tuple = load_rocstories_dataset(args.train_dataset )
snake_case_ : Dict = load_rocstories_dataset(args.eval_dataset )
snake_case_ : Optional[Any] = (train_dataset, eval_dataset)
snake_case_ : Union[str, Any] = tokenize_and_encode(_UpperCamelCase )
# Compute the max input length for the Transformer
snake_case_ : Dict = model.config.n_positions // 2 - 2
snake_case_ : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
snake_case_ : str = min(_UpperCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
snake_case_ : int = pre_process_datasets(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
snake_case_ , snake_case_ : str = tensor_datasets[0], tensor_datasets[1]
snake_case_ : List[str] = TensorDataset(*_UpperCamelCase )
snake_case_ : int = RandomSampler(_UpperCamelCase )
snake_case_ : Union[str, Any] = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.train_batch_size )
snake_case_ : Any = TensorDataset(*_UpperCamelCase )
snake_case_ : str = SequentialSampler(_UpperCamelCase )
snake_case_ : int = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
snake_case_ : Tuple = args.max_steps
snake_case_ : Union[str, Any] = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
snake_case_ : List[Any] = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
snake_case_ : Optional[Any] = list(model.named_parameters() )
snake_case_ : List[str] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
snake_case_ : str = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
snake_case_ : Optional[int] = AdamW(_UpperCamelCase , lr=args.learning_rate , eps=args.adam_epsilon )
snake_case_ : Optional[int] = get_linear_schedule_with_warmup(
_UpperCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_UpperCamelCase )
if args.do_train:
snake_case_ , snake_case_ , snake_case_ : List[str] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : Any = tqdm(_UpperCamelCase , desc='''Training''' )
for step, batch in enumerate(_UpperCamelCase ):
snake_case_ : Dict = tuple(t.to(_UpperCamelCase ) for t in batch )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = batch
snake_case_ : Union[str, Any] = model(_UpperCamelCase , mc_token_ids=_UpperCamelCase , lm_labels=_UpperCamelCase , mc_labels=_UpperCamelCase )
snake_case_ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
snake_case_ : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
snake_case_ : int = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
snake_case_ : List[Any] = model.module if hasattr(_UpperCamelCase , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
snake_case_ : Any = os.path.join(args.output_dir , _UpperCamelCase )
snake_case_ : Optional[Any] = os.path.join(args.output_dir , _UpperCamelCase )
torch.save(model_to_save.state_dict() , _UpperCamelCase )
model_to_save.config.to_json_file(_UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
snake_case_ : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
snake_case_ : str = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCamelCase )
if args.do_eval:
model.eval()
snake_case_ , snake_case_ : str = 0, 0
snake_case_ , snake_case_ : Any = 0, 0
for batch in tqdm(_UpperCamelCase , desc='''Evaluating''' ):
snake_case_ : Any = tuple(t.to(_UpperCamelCase ) for t in batch )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = batch
with torch.no_grad():
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = model(
_UpperCamelCase , mc_token_ids=_UpperCamelCase , lm_labels=_UpperCamelCase , mc_labels=_UpperCamelCase )
snake_case_ : Optional[Any] = mc_logits.detach().cpu().numpy()
snake_case_ : List[Any] = mc_labels.to('''cpu''' ).numpy()
snake_case_ : Any = accuracy(_UpperCamelCase , _UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
snake_case_ : Any = eval_loss / nb_eval_steps
snake_case_ : Optional[int] = eval_accuracy / nb_eval_examples
snake_case_ : int = tr_loss / nb_tr_steps if args.do_train else None
snake_case_ : Optional[Any] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
snake_case_ : List[str] = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _UpperCamelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 60 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
for ch in input_str:
lowerCAmelCase__ = ord(lowerCAmelCase_ )
lowerCAmelCase__ = pow(2 , lowerCAmelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = []
lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
cnt += 1
topo.append(__SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
if cnt != len(__SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(__SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 84 | 0 |
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict ):
SCREAMING_SNAKE_CASE : Optional[int] = {}
def _A ( self : str ):
print(self.vertex )
for i in self.vertex:
print(UpperCAmelCase_ , " -> " , " -> ".join([str(UpperCAmelCase_ ) for j in self.vertex[i]] ) )
def _A ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCAmelCase_ )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE : List[str] = [to_vertex]
def _A ( self : int ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE : Any = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE : Any = True
print(UpperCAmelCase_ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
snake_case = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 62 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a : Any = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowercase : List[Any] , **__lowercase : List[Any] ) -> None:
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 63 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A__ ( snake_case_ : Optional[Any] , snake_case_ : List[Any] ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
SCREAMING_SNAKE_CASE__: Optional[Any]= flax_key_tuple[:-1] + ('''weight''',)
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.permute(snake_case_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case_ ):
# linear layer
SCREAMING_SNAKE_CASE__: List[str]= flax_key_tuple[:-1] + ('''weight''',)
SCREAMING_SNAKE_CASE__: Optional[int]= flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE__: List[str]= flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def A__ ( snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Any ):
if "metadata" in layer:
SCREAMING_SNAKE_CASE__: str= layer.split('''metadata''' )
SCREAMING_SNAKE_CASE__: Optional[int]= ''''''.join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE__: Optional[Any]= [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
SCREAMING_SNAKE_CASE__: int= layer.split('''kvstore''' )
SCREAMING_SNAKE_CASE__: List[Any]= ''''''.join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE__: Tuple= [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
SCREAMING_SNAKE_CASE__: str= layer.split('''/''' )
SCREAMING_SNAKE_CASE__: int= '''/'''.join(split_layer[:-1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= (split_layer[-1],)
if "kvstore/path" in layer:
SCREAMING_SNAKE_CASE__: List[Any]= F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
SCREAMING_SNAKE_CASE__: List[Any]= '''file'''
else:
SCREAMING_SNAKE_CASE__: Any= checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A__ ( snake_case_ : Optional[Any] , snake_case_ : str ):
SCREAMING_SNAKE_CASE__: str= rename_keys(snake_case_ )
SCREAMING_SNAKE_CASE__: List[str]= {}
for k, v in current_block.items():
SCREAMING_SNAKE_CASE__: List[Any]= v
SCREAMING_SNAKE_CASE__: Tuple= new_current_block
torch.save(snake_case_ , snake_case_ )
def A__ ( snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : str = WEIGHTS_NAME ):
SCREAMING_SNAKE_CASE__: Dict= convert_file_size_to_int(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[Any]= []
SCREAMING_SNAKE_CASE__: List[str]= {}
SCREAMING_SNAKE_CASE__: Optional[int]= 0
SCREAMING_SNAKE_CASE__: Dict= 0
os.makedirs(snake_case_ , exist_ok=snake_case_ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
SCREAMING_SNAKE_CASE__: Optional[int]= serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
SCREAMING_SNAKE_CASE__: Optional[int]= flatten_dict(snake_case_ , sep='''/''' )
SCREAMING_SNAKE_CASE__: List[Any]= {}
for layer in checkpoint_info.keys():
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= get_key_and_tensorstore_dict(
snake_case_ , snake_case_ , snake_case_ )
if curr_real_layer_name in all_layers:
SCREAMING_SNAKE_CASE__: Tuple= content
else:
SCREAMING_SNAKE_CASE__: List[Any]= {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
SCREAMING_SNAKE_CASE__: Union[str, Any]= ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
SCREAMING_SNAKE_CASE__: Dict= torch.tensor(snake_case_ )
SCREAMING_SNAKE_CASE__: Dict= raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= rename_base_flax_keys(tuple(key.split('''/''' ) ) , snake_case_ )
SCREAMING_SNAKE_CASE__: str= '''/'''.join(snake_case_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
SCREAMING_SNAKE_CASE__: Optional[Any]= os.path.join(
snake_case_ , weights_name.replace('''.bin''' , F'-{len(snake_case_ )+1:05d}-of-???.bin' ) )
rename_and_save_block(snake_case_ , snake_case_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
SCREAMING_SNAKE_CASE__: Any= {}
SCREAMING_SNAKE_CASE__: Any= 0
SCREAMING_SNAKE_CASE__: List[str]= raw_weights.to(getattr(snake_case_ , snake_case_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
SCREAMING_SNAKE_CASE__: Any= os.path.join(snake_case_ , weights_name.replace('''.bin''' , F'-{len(snake_case_ )+1:05d}-of-???.bin' ) )
rename_and_save_block(snake_case_ , snake_case_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE__: Dict= {}
SCREAMING_SNAKE_CASE__: List[str]= {}
for idx, shard in enumerate(snake_case_ ):
SCREAMING_SNAKE_CASE__: int= weights_name.replace(
'''.bin''' , F'-{idx+1:05d}-of-{len(snake_case_ ):05d}.bin' ) # len(sharded_state_dicts):05d}
SCREAMING_SNAKE_CASE__: Tuple= os.path.join(snake_case_ , weights_name.replace('''.bin''' , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(snake_case_ , os.path.join(snake_case_ , snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= shard
for key in shard:
SCREAMING_SNAKE_CASE__: Union[str, Any]= shard_file
# Add the metadata
SCREAMING_SNAKE_CASE__: str= {'''total_size''': total_size}
SCREAMING_SNAKE_CASE__: str= {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(snake_case_ , snake_case_ ) , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE__: List[Any]= json.dumps(snake_case_ , indent=2 , sort_keys=snake_case_ ) + '''\n'''
f.write(snake_case_ )
return metadata, index
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowercase_ : str = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A__ ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
SCREAMING_SNAKE_CASE__: str= SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
SCREAMING_SNAKE_CASE__: Tuple= SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
SCREAMING_SNAKE_CASE__: Any= TaTokenizer.from_pretrained('''t5-small''' )
SCREAMING_SNAKE_CASE__: Any= '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
SCREAMING_SNAKE_CASE__: Optional[int]= tokenizer(snake_case_ , return_tensors='''pt''' ).input_ids
SCREAMING_SNAKE_CASE__: List[str]= model.generate(snake_case_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase = '''true'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=16 ):
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(__SCREAMING_SNAKE_CASE )
lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
lowercase = dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=16 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
lowercase = get_dataloader(__SCREAMING_SNAKE_CASE , not dispatch_batches )
lowercase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for batch in dataloader:
lowercase , lowercase = batch.values()
with torch.no_grad():
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase , lowercase = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=16 ):
lowercase , lowercase , lowercase = get_basic_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase , lowercase = generate_predictions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False ):
lowercase = evaluate.load('glue' , 'mrpc' )
lowercase , lowercase = get_mrpc_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# First do baseline
lowercase , lowercase , lowercase = setup['no']
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=batch['labels'] )
lowercase = metric.compute()
# Then do distributed
lowercase , lowercase , lowercase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase = batch['labels']
lowercase , lowercase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
lowercase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase_ ( ):
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 84 | 0 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__UpperCAmelCase = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {}
state_dict.pop("""pixel_mean""" , __UpperCamelCase )
state_dict.pop("""pixel_std""" , __UpperCamelCase )
UpperCAmelCase__ : int = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase__ : List[str] = key.replace(__UpperCamelCase , __UpperCamelCase )
if re.match(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Tuple = int(re.match(__UpperCamelCase , __UpperCamelCase ).group(2 ) )
if layer_nb == 0:
UpperCAmelCase__ : Optional[Any] = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
UpperCAmelCase__ : Any = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
UpperCAmelCase__ : Any = key.replace("""layers.2""" , """proj_out""" )
UpperCAmelCase__ : Union[str, Any] = value
UpperCAmelCase__ : Dict = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="ybelkada/segment-anything" ):
'''simple docstring'''
UpperCAmelCase__ : str = hf_hub_download(__UpperCamelCase , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
UpperCAmelCase__ : Any = SamConfig()
elif "sam_vit_l" in model_name:
UpperCAmelCase__ : Union[str, Any] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
UpperCAmelCase__ : str = SamConfig(
vision_config=__UpperCamelCase , )
elif "sam_vit_h" in model_name:
UpperCAmelCase__ : List[Any] = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
UpperCAmelCase__ : Optional[int] = SamConfig(
vision_config=__UpperCamelCase , )
UpperCAmelCase__ : List[str] = torch.load(__UpperCamelCase , map_location="""cpu""" )
UpperCAmelCase__ : Tuple = replace_keys(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = SamImageProcessor()
UpperCAmelCase__ : Dict = SamProcessor(image_processor=__UpperCamelCase )
UpperCAmelCase__ : int = SamModel(__UpperCamelCase )
hf_model.load_state_dict(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = hf_model.to("""cuda""" )
UpperCAmelCase__ : Optional[Any] = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
UpperCAmelCase__ : Dict = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert("""RGB""" )
UpperCAmelCase__ : Tuple = [[[400, 650]]]
UpperCAmelCase__ : Tuple = [[1]]
UpperCAmelCase__ : int = processor(images=np.array(__UpperCamelCase ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = hf_model(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
UpperCAmelCase__ : Union[str, Any] = processor(
images=np.array(__UpperCamelCase ) , input_points=__UpperCamelCase , input_labels=__UpperCamelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = hf_model(**__UpperCamelCase )
UpperCAmelCase__ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
UpperCAmelCase__ : str = ((75, 275, 1725, 850),)
UpperCAmelCase__ : List[Any] = processor(images=np.array(__UpperCamelCase ) , input_boxes=__UpperCamelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = hf_model(**__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
UpperCAmelCase__ : Any = [[[400, 650], [800, 650]]]
UpperCAmelCase__ : int = [[1, 1]]
UpperCAmelCase__ : str = processor(
images=np.array(__UpperCamelCase ) , input_points=__UpperCamelCase , input_labels=__UpperCamelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
UpperCAmelCase__ : Tuple = hf_model(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
__UpperCAmelCase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 65 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : Any = """OwlViTImageProcessor"""
_UpperCamelCase : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case=None , snake_case=None , **snake_case ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="max_length" , snake_case="np" , **snake_case ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(snake_case , snake_case ) or (isinstance(snake_case , snake_case ) and not isinstance(text[0] , snake_case )):
lowercase = [self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )]
elif isinstance(snake_case , snake_case ) and isinstance(text[0] , snake_case ):
lowercase = []
# Maximum number of queries across batch
lowercase = max([len(snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case ) != max_num_queries:
lowercase = t + [' '] * (max_num_queries - len(snake_case ))
lowercase = self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )
encodings.append(snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase = BatchEncoding()
lowercase = input_ids
lowercase = attention_mask
if query_images is not None:
lowercase = BatchEncoding()
lowercase = self.image_processor(
snake_case , return_tensors=snake_case , **snake_case ).pixel_values
lowercase = query_pixel_values
if images is not None:
lowercase = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_object_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_image_guided_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 84 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Dict:
_lowercase : Optional[Any] = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
_lowercase , _lowercase : Any = input_paths_and_base_extractors[compression_format]
if input_path is None:
_lowercase : List[str] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(SCREAMING_SNAKE_CASE )
_lowercase : Any = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowercase : Optional[int] = file_path.read_text(encoding='utf-8' )
else:
_lowercase : Any = output_path.read_text(encoding='utf-8' )
_lowercase : str = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
_lowercase : Tuple = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
_lowercase : Any = input_paths[compression_format]
if input_path is None:
_lowercase : List[str] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = Extractor.infer_extractor_format(SCREAMING_SNAKE_CASE )
assert extractor_format is not None
_lowercase : int = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowercase : List[Any] = file_path.read_text(encoding='utf-8' )
else:
_lowercase : List[str] = output_path.read_text(encoding='utf-8' )
_lowercase : str = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
import tarfile
_lowercase : List[Any] = tmp_path / 'data_dot_dot'
directory.mkdir()
_lowercase : Optional[Any] = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
import tarfile
_lowercase : List[Any] = tmp_path / 'data_sym_link'
directory.mkdir()
_lowercase : str = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=SCREAMING_SNAKE_CASE )
with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
_lowercase : Optional[int] = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
_lowercase : str = insecure_tar_files[insecure_tar_file]
_lowercase : List[Any] = tmp_path / 'extracted'
TarExtractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
_lowercase : List[str] = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
_lowercase : List[str] = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(SCREAMING_SNAKE_CASE ) # but we're right
| 66 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
snake_case = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
snake_case = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> int:
_lowercase = list(state_dict.keys() )
for name in state_dict_keys:
_lowercase = state_dict.pop(snake_case__ )
# emb -> embedding
if name.startswith('emb.' ):
_lowercase = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_lowercase = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_lowercase = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , snake_case__ )
# ffn -> feed_forward
_lowercase = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , snake_case__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_lowercase = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_lowercase = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_lowercase = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_lowercase = 'rwkv.' + name
_lowercase = weight
return state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :Any , snake_case__ :Optional[Any] , snake_case__ :Optional[int]=None , snake_case__ :List[Any]=None , snake_case__ :List[Any]=False , snake_case__ :Union[str, Any]=None ) -> Union[str, Any]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_lowercase = 5_0277
_lowercase = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_lowercase = PreTrainedTokenizerFast(tokenizer_file=snake_case__ )
_lowercase = len(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
# 2. Build the config
_lowercase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_lowercase = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_lowercase = RwkvConfig(
vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case__ )
# 3. Download model file then convert state_dict
_lowercase = hf_hub_download(snake_case__ , snake_case__ )
_lowercase = torch.load(snake_case__ , map_location='cpu' )
_lowercase = convert_state_dict(snake_case__ )
# 4. Split in shards and save
_lowercase , _lowercase = shard_checkpoint(snake_case__ )
for shard_file, shard in shards.items():
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if index is not None:
_lowercase = os.path.join(snake_case__ , snake_case__ )
# Save the index as well
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
_lowercase = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '\n'
f.write(snake_case__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_lowercase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_lowercase = torch.load(os.path.join(snake_case__ , snake_case__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_lowercase = AutoModelForCausalLM.from_pretrained(snake_case__ )
model.push_to_hub(snake_case__ , max_shard_size='2GB' )
tokenizer.push_to_hub(snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
snake_case = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
) | 67 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
lowercase = model(snake_case , token_type_ids=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = self.num_labels
lowercase = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCamelCase : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
lowercase = inputs_dict['labels']
lowercase = inputs_dict['labels']
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case )
lowercase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
lowercase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 84 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A = 16
__A = 32
def lowercase__ ( A_: Accelerator , A_: int = 16 ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A_: Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A_ , max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase =datasets.map(
A_ , batched=A_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A_: Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase =16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase =8
else:
__UpperCAmelCase =None
return tokenizer.pad(
A_ , padding="""longest""" , max_length=A_ , pad_to_multiple_of=A_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
__UpperCAmelCase =DataLoader(
tokenized_datasets["""train"""] , shuffle=A_ , collate_fn=A_ , batch_size=A_ )
__UpperCAmelCase =DataLoader(
tokenized_datasets["""validation"""] , shuffle=A_ , collate_fn=A_ , batch_size=A_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A = mocked_dataloaders # noqa: F811
def lowercase__ ( A_: List[str] , A_: List[str] ) -> Dict:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , A_ ) == "1":
__UpperCAmelCase =2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCAmelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
__UpperCAmelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase =config["""lr"""]
__UpperCAmelCase =int(config["""num_epochs"""] )
__UpperCAmelCase =int(config["""seed"""] )
__UpperCAmelCase =int(config["""batch_size"""] )
set_seed(A_ )
__UpperCAmelCase , __UpperCAmelCase =get_dataloaders(A_ , A_ )
__UpperCAmelCase =evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__UpperCAmelCase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCAmelCase =batch_size // MAX_GPU_BATCH_SIZE
__UpperCAmelCase =MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=A_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase =model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase =AdamW(params=model.parameters() , lr=A_ )
# Instantiate scheduler
__UpperCAmelCase =get_linear_schedule_with_warmup(
optimizer=A_ , num_warmup_steps=100 , num_training_steps=(len(A_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =accelerator.prepare(
A_ , A_ , A_ , A_ , A_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCAmelCase =os.path.split(A_ )[-1].split(""".""" )[0]
accelerator.init_trackers(A_ , A_ )
# Now we train the model
for epoch in range(A_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCAmelCase =0
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase =model(**A_ )
__UpperCAmelCase =outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCAmelCase =loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase =model(**A_ )
__UpperCAmelCase =outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=A_ , references=A_ , )
__UpperCAmelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , A_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(A_ ),
"""epoch""": epoch,
} , step=A_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=A_ , default=A_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=A_ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
__UpperCAmelCase =parser.parse_args()
__UpperCAmelCase ={"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A_ , A_ )
if __name__ == "__main__":
main()
| 68 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = GPTSanJapaneseTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = {"""do_clean_text""": False, """add_prefix_space""": False}
def A ( self : Dict ):
"""simple docstring"""
super().setUp()
# fmt: off
__snake_case = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
__snake_case = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
__snake_case = {"unk_token": "<unk>"}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(a_ ) )
def A ( self : Any , **a_ : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a_ )
def A ( self : Tuple , a_ : List[str] ):
"""simple docstring"""
__snake_case = "こんにちは、世界。 \nこんばんは、㔺界。😀"
__snake_case = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def A ( self : str , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.get_input_output_texts(a_ )
__snake_case = tokenizer.encode(a_ , add_special_tokens=a_ )
__snake_case = tokenizer.decode(a_ , clean_up_tokenization_spaces=a_ )
return text, ids
def A ( self : Optional[Any] ):
"""simple docstring"""
pass # TODO add if relevant
def A ( self : int ):
"""simple docstring"""
pass # TODO add if relevant
def A ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = "こんにちは、世界。 こんばんは、㔺界。"
__snake_case = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
__snake_case = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids without special tokens
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids with special tokens
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_ , a_ )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
# Testing tokenization
__snake_case = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
__snake_case = "こんにちは、、、、世界。こんばんは、、、、世界。"
__snake_case = tokenizer.encode(a_ )
__snake_case = tokenizer.decode(a_ )
self.assertEqual(a_ , a_ )
@slow
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
__snake_case = "こんにちは、世界。"
__snake_case = "こんばんは、㔺界。😀"
__snake_case = "こんにちは、世界。こんばんは、世界。😀"
__snake_case = tokenizer.encode(prefix_text + input_text )
__snake_case = tokenizer.encode("" , prefix_text=prefix_text + input_text )
__snake_case = tokenizer.encode(a_ , prefix_text=a_ )
__snake_case = tokenizer.decode(a_ )
__snake_case = tokenizer.decode(a_ )
__snake_case = tokenizer.decode(a_ )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , a_ )
@slow
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
__snake_case = "こんにちは、世界。"
__snake_case = "こんばんは、㔺界。😀"
__snake_case = len(tokenizer.encode(a_ ) ) - 2
__snake_case = len(tokenizer.encode(a_ ) ) - 2
__snake_case = [1] + [0] * (len_prefix + len_text + 1)
__snake_case = [1] * (len_prefix + len_text + 1) + [0]
__snake_case = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case = tokenizer(a_ , prefix_text=a_ ).token_type_ids
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , a_ )
@slow
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
__snake_case = tokenizer.encode("あンいワ" )
__snake_case = tokenizer.encode("" , prefix_text="あンいワ" )
__snake_case = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(a_ ) , tokenizer.decode(a_ ) )
self.assertEqual(tokenizer.decode(a_ ) , tokenizer.decode(a_ ) )
self.assertNotEqual(a_ , a_ )
self.assertNotEqual(a_ , a_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A ( self : str ):
"""simple docstring"""
__snake_case = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
__snake_case = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
__snake_case = tokenizer(a_ , padding=a_ )
__snake_case = tokenizer.batch_encode_plus(a_ , padding=a_ )
# fmt: off
__snake_case = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
__snake_case = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , a_ )
self.assertListEqual(x_token.token_type_ids , a_ )
self.assertListEqual(x_token.attention_mask , a_ )
self.assertListEqual(x_token_a.input_ids , a_ )
self.assertListEqual(x_token_a.token_type_ids , a_ )
self.assertListEqual(x_token_a.attention_mask , a_ )
def A ( self : List[str] ):
"""simple docstring"""
pass
def A ( self : int ):
"""simple docstring"""
pass
| 69 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : Any = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 70 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
UpperCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
UpperCAmelCase = re.compile(R'''^\s*else:''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.readlines()
lowercase = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase = re.findall(r'\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase = []
for key in import_dict_objects.keys():
lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
lowercase = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
lowercase = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
UpperCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase = direct_transformers_import(__SCREAMING_SNAKE_CASE )
lowercase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' ) as f:
lowercase = f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , __SCREAMING_SNAKE_CASE ) ) )
lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 84 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class _snake_case :
def __init__( self ,_snake_case ):
UpperCAmelCase_ : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_snake_case )
self.set_fail_transitions()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[int] = 0
for character in keyword:
UpperCAmelCase_ : List[str] = self.find_next_state(_snake_case ,_snake_case )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase_ : str = len(self.adlist ) - 1
else:
UpperCAmelCase_ : str = next_state
self.adlist[current_state]["output"].append(_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(_snake_case )
UpperCAmelCase_ : Optional[int] = 0
while q:
UpperCAmelCase_ : int = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_snake_case )
UpperCAmelCase_ : int = self.adlist[r]["fail_state"]
while (
self.find_next_state(_snake_case ,self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase_ : Tuple = self.adlist[state]["fail_state"]
UpperCAmelCase_ : Union[str, Any] = self.find_next_state(
_snake_case ,self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Union[str, Any] = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : dict = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase_ : Optional[Any] = 0
for i in range(len(_snake_case ) ):
while (
self.find_next_state(_snake_case ,string[i] ) is None
and current_state != 0
):
UpperCAmelCase_ : List[str] = self.adlist[current_state]["fail_state"]
UpperCAmelCase_ : Union[str, Any] = self.find_next_state(_snake_case ,string[i] )
if next_state is None:
UpperCAmelCase_ : Optional[Any] = 0
else:
UpperCAmelCase_ : Union[str, Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase_ : int = []
result[key].append(i - len(_snake_case ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''T''')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = data
lowercase = None
def __str__( self ):
return F'''{self.data}'''
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
lowercase = None
def __iter__( self ):
lowercase = self.top
while node:
yield node.data
lowercase = node.next
def __str__( self ):
return "->".join([str(snake_case ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.top is None
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = Node(snake_case )
if not self.is_empty():
lowercase = self.top
lowercase = node
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , snake_case )
lowercase = self.top
lowercase = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84 | 0 |
'''simple docstring'''
_UpperCAmelCase : dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_02_17_66_34e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_5818,
}
def UpperCamelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : float ) -> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowercase =(
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(lowercase_ )}'
)
raise ValueError(lowercase_ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = LlamaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = LlamaModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
lowercase = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = True
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase : List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : int = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LlamaModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'single_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'multi_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 10] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = LlamaModel(snake_case )
original_model.to(snake_case )
original_model.eval()
lowercase = original_model(snake_case ).last_hidden_state
lowercase = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {'type': scaling_type, 'factor': 10.0}
lowercase = LlamaModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
lowercase = scaled_model(snake_case ).last_hidden_state
lowercase = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
lowercase = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowercase = 'Simply put, the theory of relativity states that '
lowercase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowercase = tokenizer.encode(snake_case , return_tensors='pt' )
lowercase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=snake_case )
# greedy generation outputs
lowercase = model.generate(snake_case , max_new_tokens=64 , top_p=snake_case , temperature=1 , do_sample=snake_case )
lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case )
self.assertEqual(snake_case , snake_case )
| 84 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _snake_case ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
_lowercase : Tuple = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCamelCase__ ():
if os.name == "nt":
SCREAMING_SNAKE_CASE = CursorInfo()
SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCAmelCase , ctypes.byref(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCAmelCase , ctypes.byref(_UpperCAmelCase))
elif os.name == "posix":
sys.stdout.write('\033[?25l')
sys.stdout.flush()
def lowerCamelCase__ ():
if os.name == "nt":
SCREAMING_SNAKE_CASE = CursorInfo()
SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCAmelCase , ctypes.byref(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCAmelCase , ctypes.byref(_UpperCAmelCase))
elif os.name == "posix":
sys.stdout.write('\033[?25h')
sys.stdout.flush()
@contextmanager
def lowerCamelCase__ ():
try:
hide_cursor()
yield
finally:
show_cursor()
| 73 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''input_values''', '''attention_mask''']
def __init__( self : Tuple , _A : int = 1 , _A : int = 1_6000 , _A : float = 0.0 , _A : bool = False , _A : int = 80 , _A : int = 16 , _A : int = 64 , _A : str = "hann_window" , _A : float = 1.0 , _A : float = 80 , _A : float = 7600 , _A : float = 1e-10 , _A : int = 2 , _A : bool = True , **_A : Optional[Any] , ):
"""simple docstring"""
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = do_normalize
__SCREAMING_SNAKE_CASE : Optional[Any] = return_attention_mask
__SCREAMING_SNAKE_CASE : Optional[Any] = num_mel_bins
__SCREAMING_SNAKE_CASE : Dict = hop_length
__SCREAMING_SNAKE_CASE : Any = win_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = win_function
__SCREAMING_SNAKE_CASE : str = frame_signal_scale
__SCREAMING_SNAKE_CASE : Tuple = fmin
__SCREAMING_SNAKE_CASE : Any = fmax
__SCREAMING_SNAKE_CASE : Dict = mel_floor
__SCREAMING_SNAKE_CASE : Union[str, Any] = reduction_factor
__SCREAMING_SNAKE_CASE : List[str] = win_length * sampling_rate // 1000
__SCREAMING_SNAKE_CASE : List[Any] = hop_length * sampling_rate // 1000
__SCREAMING_SNAKE_CASE : Union[str, Any] = optimal_fft_length(self.sample_size )
__SCREAMING_SNAKE_CASE : str = (self.n_fft // 2) + 1
__SCREAMING_SNAKE_CASE : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(_A , np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
__SCREAMING_SNAKE_CASE : Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__SCREAMING_SNAKE_CASE : Any = padding_value
normed_input_values.append(_A )
else:
__SCREAMING_SNAKE_CASE : int = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase__ ( self : Any , _A : np.ndarray , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = spectrogram(
_A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self : Dict , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , **_A : str , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
__SCREAMING_SNAKE_CASE : str = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if audio_target is not None:
__SCREAMING_SNAKE_CASE : List[Any] = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
if inputs is None:
return inputs_target
else:
__SCREAMING_SNAKE_CASE : str = inputs_target['''input_values''']
__SCREAMING_SNAKE_CASE : Dict = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : Tuple = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self : Tuple , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = False , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , **_A : str , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = isinstance(_A , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE : Tuple = [np.asarray(_A , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Tuple = speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE : Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_size
# convert into correct format for padding
if is_target:
__SCREAMING_SNAKE_CASE : Tuple = [self._extract_mel_features(_A ) for waveform in speech]
__SCREAMING_SNAKE_CASE : Tuple = BatchFeature({'''input_values''': features} )
__SCREAMING_SNAKE_CASE : Any = self.num_mel_bins
else:
__SCREAMING_SNAKE_CASE : Dict = BatchFeature({'''input_values''': speech} )
__SCREAMING_SNAKE_CASE : Dict = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
__SCREAMING_SNAKE_CASE : List[Any] = feature_size_hack
# convert input values to correct format
__SCREAMING_SNAKE_CASE : str = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = [np.asarray(_A , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_A , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__SCREAMING_SNAKE_CASE : List[Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Any = input_values.astype(np.floataa )
# convert attention_mask to correct format
__SCREAMING_SNAKE_CASE : List[str] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__SCREAMING_SNAKE_CASE : Optional[Any] = (
attention_mask
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__SCREAMING_SNAKE_CASE : List[str] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=_A , padding_value=self.padding_value )
if return_tensors is not None:
__SCREAMING_SNAKE_CASE : str = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__SCREAMING_SNAKE_CASE : int = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 74 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = OpenAIGPTTokenizer
_UpperCamelCase : List[Any] = OpenAIGPTTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowercase = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase = 'lower'
lowercase = ['low', 'er</w>']
lowercase = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = tokens + ['<unk>']
lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
lowercase = 'This is a simple input'
lowercase = ['This is a simple input 1', 'This is a simple input 2']
lowercase = ('This is a simple input', 'This is a pair')
lowercase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A_ ( __lowerCamelCase ):
'''simple docstring'''
pass
| 84 | 0 |
'''simple docstring'''
from math import factorial
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 75 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase = 0
# Doctest custom flag to ignore output.
UpperCAmelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase = doctest.OutputChecker
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , snake_case , snake_case , snake_case )
UpperCAmelCase = CustomOutputChecker
UpperCAmelCase = HfDoctestModule
UpperCAmelCase = HfDocTestParser
| 84 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ = 1_6
a_ = 3_2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = "bert-base-cased" ):
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
__lowercase : Dict = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase : List[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase : str = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__lowercase : Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
__lowercase : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
# Initialize accelerator
__lowercase : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase : str = config['''lr''']
__lowercase : Optional[Any] = int(config['''num_epochs'''] )
__lowercase : Union[str, Any] = int(config['''seed'''] )
__lowercase : Optional[Any] = int(config['''batch_size'''] )
__lowercase : Union[str, Any] = args.model_name_or_path
set_seed(__UpperCamelCase )
__lowercase ,__lowercase : Optional[Any] = get_dataloaders(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase : Dict = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
# Instantiate optimizer
__lowercase : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase : Tuple = optimizer_cls(params=model.parameters() , lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
__lowercase : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__lowercase : List[str] = 1
__lowercase : str = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=0 , num_training_steps=__UpperCamelCase , )
else:
__lowercase : Optional[Any] = DummyScheduler(__UpperCamelCase , total_num_steps=__UpperCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase : List[str] = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
__lowercase : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase : List[Any] = 0
# Now we train the model
__lowercase : Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
__lowercase : Optional[int] = 0
__lowercase : Union[str, Any] = {}
for epoch in range(__UpperCamelCase , __UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
__lowercase : int = model(**__UpperCamelCase )
__lowercase : List[str] = outputs.loss
__lowercase : int = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__lowercase : str = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase : int = model(**__UpperCamelCase )
__lowercase : List[str] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowercase ,__lowercase : Optional[Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
__lowercase : int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowercase : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
__lowercase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __UpperCamelCase )
__lowercase : str = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__lowercase : List[str] = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( ):
__lowercase : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__UpperCamelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__UpperCamelCase , )
parser.add_argument(
'''--output_dir''' , type=__UpperCamelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=__UpperCamelCase , default=__UpperCamelCase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=__UpperCamelCase , default=3 , help='''Number of train epochs.''' , )
__lowercase : str = parser.parse_args()
__lowercase : str = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 76 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False ):
super().__init__()
lowercase = n_token
lowercase = d_embed
lowercase = d_proj
lowercase = cutoffs + [n_token]
lowercase = [0] + self.cutoffs
lowercase = div_val
lowercase = self.cutoffs[0]
lowercase = len(self.cutoffs ) - 1
lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase = nn.ModuleList()
lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
else:
self.out_projs.append(snake_case )
self.out_layers.append(nn.Linear(snake_case , snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
self.out_layers.append(nn.Linear(snake_case , r_idx - l_idx ) )
lowercase = keep_order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if proj is None:
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase = nn.functional.linear(snake_case , proj.t().contiguous() )
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowercase = hidden[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = hidden.view(-1 , hidden.size(-1 ) )
lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowercase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase = labels != -100
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = (
-nn.functional.log_softmax(snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase = nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
if labels is None:
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = 0
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase = (labels >= l_idx) & (labels < r_idx)
lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase = labels.index_select(0 , snake_case ) - l_idx
lowercase = head_logprob.index_select(0 , snake_case )
lowercase = hidden.index_select(0 , snake_case )
else:
lowercase = hidden
if i == 0:
if labels is not None:
lowercase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = head_logprob[:, -i] + tail_logprob_i
lowercase = logprob_i
return out
| 84 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : list[list[str]] = [[] for _ in range(UpperCamelCase )]
__UpperCAmelCase : Union[str, Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(UpperCamelCase ) <= key:
return input_string
for position, character in enumerate(UpperCamelCase ):
__UpperCAmelCase : Dict = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase : List[str] = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = ["".join(UpperCamelCase ) for row in temp_grid]
__UpperCAmelCase : Any = "".join(UpperCamelCase )
return output_string
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
__UpperCAmelCase : list[list[str]] = [[] for _ in range(UpperCamelCase )] # generates template
for position in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Optional[int] = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase : str = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
__UpperCAmelCase : Union[str, Any] = 0
for row in temp_grid: # fills in the characters
__UpperCAmelCase : Tuple = input_string[counter : counter + len(UpperCamelCase )]
grid.append(list(UpperCamelCase ) )
counter += len(UpperCamelCase )
__UpperCAmelCase : List[str] = "" # reads as zigzag
for position in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Dict = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase : Union[str, Any] = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _UpperCamelCase ( UpperCamelCase ) -> dict[int, str]:
"""simple docstring"""
__UpperCAmelCase : Tuple = {}
for key_guess in range(1 , len(UpperCamelCase ) ): # tries every key
__UpperCAmelCase : str = decrypt(UpperCamelCase , UpperCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case ) != 0:
lowercase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case ) != cols:
raise error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise error
lowercase = rows
else:
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case ).determinant()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if (row + column) % 2 == 0:
return self.get_minor(snake_case , snake_case )
return -1 * self.get_minor(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[self.get_minor(snake_case , snake_case ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case )
else:
lowercase = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in column:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
lowercase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , snake_case ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , snake_case ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , snake_case ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , snake_case ):
if isinstance(snake_case , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case , snake_case ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case , snake_case ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
lowercase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case ):
return sum(row[i] * column[i] for i in range(len(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> tuple:
'''simple docstring'''
UpperCAmelCase_ = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=8 ):
lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , ):
super().__init__()
self.register_modules(
unet=snake_case , scheduler=snake_case , movq=snake_case , )
lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
if latents is None:
lowercase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase = latents.to(snake_case )
lowercase = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case )
# We'll offload the last model manually.
lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self , snake_case , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ):
lowercase = self._execution_device
lowercase = guidance_scale > 1.0
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
lowercase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
if do_classifier_free_guidance:
lowercase = image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = negative_image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case )
self.scheduler.set_timesteps(snake_case , device=snake_case )
lowercase = self.scheduler.timesteps
lowercase = self.unet.config.in_channels
lowercase , lowercase = downscale_height_and_width(snake_case , snake_case , self.movq_scale_factor )
# create initial latent
lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case , snake_case , snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = {'image_embeds': image_embeds}
lowercase = self.unet(
sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0]
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase = noise_pred.chunk(2 )
lowercase , lowercase = variance_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(
snake_case , snake_case , snake_case , generator=snake_case , )[0]
# post-processing
lowercase = self.movq.decode(snake_case , force_not_quantize=snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase = image * 0.5 + 0.5
lowercase = image.clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 84 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
'''simple docstring'''
# Load configuration defined in the metadata file
with open(__lowerCamelCase ) as metadata_file:
UpperCAmelCase__ : Union[str, Any] = json.load(__lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = LukeConfig(use_entity_aware_attention=__lowerCamelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
UpperCAmelCase__ : Optional[int] = torch.load(__lowerCamelCase , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
UpperCAmelCase__ : List[str] = load_original_entity_vocab(__lowerCamelCase )
# add an entry for [MASK2]
UpperCAmelCase__ : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCAmelCase__ : Any = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCAmelCase__ : str = AddedToken("""<ent>""" , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )
UpperCAmelCase__ : int = AddedToken("""<ent2>""" , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , """tokenizer_config.json""" ) , """r""" ) as f:
UpperCAmelCase__ : str = json.load(__lowerCamelCase )
UpperCAmelCase__ : Optional[int] = """MLukeTokenizer"""
with open(os.path.join(__lowerCamelCase , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ : str = MLukeTokenizer.from_pretrained(__lowerCamelCase )
# Initialize the embeddings of the special tokens
UpperCAmelCase__ : List[str] = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
UpperCAmelCase__ : int = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
UpperCAmelCase__ : Dict = state_dict["""embeddings.word_embeddings.weight"""]
UpperCAmelCase__ : Tuple = word_emb[ent_init_index].unsqueeze(0 )
UpperCAmelCase__ : str = word_emb[enta_init_index].unsqueeze(0 )
UpperCAmelCase__ : List[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCAmelCase__ : Dict = state_dict[bias_name]
UpperCAmelCase__ : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCAmelCase__ : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCAmelCase__ : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCAmelCase__ : Tuple = F"encoder.layer.{layer_index}.attention.self."
UpperCAmelCase__ : Optional[int] = state_dict[prefix + matrix_name]
UpperCAmelCase__ : List[str] = state_dict[prefix + matrix_name]
UpperCAmelCase__ : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCAmelCase__ : Any = state_dict["""entity_embeddings.entity_embeddings.weight"""]
UpperCAmelCase__ : List[str] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCAmelCase__ : Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCAmelCase__ : List[str] = state_dict["""entity_predictions.bias"""]
UpperCAmelCase__ : int = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCAmelCase__ : str = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCAmelCase__ : int = LukeForMaskedLM(config=__lowerCamelCase ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
UpperCAmelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
UpperCAmelCase__ : str = state_dict[key]
else:
UpperCAmelCase__ : List[Any] = state_dict[key]
UpperCAmelCase__ , UpperCAmelCase__ : Any = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
if set(__lowerCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(__lowerCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCAmelCase__ : Tuple = MLukeTokenizer.from_pretrained(__lowerCamelCase , task="""entity_classification""" )
UpperCAmelCase__ : Optional[Any] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
UpperCAmelCase__ : Union[str, Any] = (0, 9)
UpperCAmelCase__ : Union[str, Any] = tokenizer(__lowerCamelCase , entity_spans=[span] , return_tensors="""pt""" )
UpperCAmelCase__ : Optional[int] = model(**__lowerCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCAmelCase__ : Union[str, Any] = torch.Size((1, 33, 768) )
UpperCAmelCase__ : Tuple = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCAmelCase__ : Optional[int] = torch.Size((1, 1, 768) )
UpperCAmelCase__ : List[str] = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCAmelCase__ : Tuple = MLukeTokenizer.from_pretrained(__lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = """Tokyo is the capital of <mask>."""
UpperCAmelCase__ : int = (24, 30)
UpperCAmelCase__ : List[Any] = tokenizer(__lowerCamelCase , entity_spans=[span] , return_tensors="""pt""" )
UpperCAmelCase__ : Optional[int] = model(**__lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = encoding["""input_ids"""][0].tolist()
UpperCAmelCase__ : Optional[int] = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
UpperCAmelCase__ : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
UpperCAmelCase__ : Union[str, Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__lowerCamelCase ) )
model.save_pretrained(__lowerCamelCase )
def _lowerCamelCase ( __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
UpperCAmelCase__ : str = [json.loads(__lowerCamelCase ) for line in open(__lowerCamelCase )]
UpperCAmelCase__ : Optional[Any] = {}
for entry in data:
UpperCAmelCase__ : Optional[Any] = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCAmelCase__ : Union[str, Any] = entity_id
break
UpperCAmelCase__ : int = F"{language}:{entity_name}"
UpperCAmelCase__ : str = entity_id
return new_mapping
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 79 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 84 | 0 |
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return "".join([hex(lowerCamelCase )[2:].zfill(2 ).upper() for byte in list(lowerCamelCase )] )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if (len(lowerCamelCase ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCamelCase ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
return n == n[::-1]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100_0000 ):
lowercase = 0
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if is_palindrome(__SCREAMING_SNAKE_CASE ) and is_palindrome(bin(__SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 84 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="attention" ):
__snake_case : Tuple = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
__snake_case : Dict = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__snake_case : str = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
__snake_case : List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__snake_case : Any = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
__snake_case : Optional[int] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__snake_case : List[str] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
__snake_case : List[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ):
if split_mlp_wi:
__snake_case : Any = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
__snake_case : List[Any] = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
__snake_case : Optional[Any] = (wi_a, wi_a)
else:
__snake_case : Optional[int] = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
__snake_case : List[Any] = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def lowerCAmelCase_ ( __lowerCamelCase , *, __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
__snake_case : Union[str, Any] = traverse_util.flatten_dict(variables["target"] )
__snake_case : str = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__snake_case : Any = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
__snake_case : Tuple = collections.OrderedDict()
# Shared embeddings.
__snake_case : Optional[int] = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
__snake_case : Union[str, Any] = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
__snake_case , __snake_case , __snake_case , __snake_case : List[str] = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
__snake_case : List[str] = layer_norm
__snake_case : Union[str, Any] = k.T
__snake_case : List[str] = o.T
__snake_case : List[str] = q.T
__snake_case : Union[str, Any] = v.T
# Block i, layer 1 (MLP).
__snake_case : str = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
__snake_case , __snake_case : Any = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
__snake_case : str = layer_norm
if split_mlp_wi:
__snake_case : Optional[int] = wi[0].T
__snake_case : List[Any] = wi[1].T
else:
__snake_case : Any = wi.T
__snake_case : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__snake_case : List[Any] = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
__snake_case : int = old["encoder/encoder_norm/scale"]
if not scalable_attention:
__snake_case : Optional[int] = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
__snake_case : Union[str, Any] = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
__snake_case : Tuple = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
__snake_case : int = layer_norm
__snake_case : Tuple = k.T
__snake_case : List[Any] = o.T
__snake_case : str = q.T
__snake_case : Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
__snake_case : Optional[Any] = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
__snake_case , __snake_case , __snake_case , __snake_case : List[Any] = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
__snake_case : Optional[int] = layer_norm
__snake_case : int = k.T
__snake_case : Optional[Any] = o.T
__snake_case : Dict = q.T
__snake_case : List[Any] = v.T
# Block i, layer 2 (MLP).
__snake_case : Dict = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
__snake_case , __snake_case : Dict = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
__snake_case : Dict = layer_norm
if split_mlp_wi:
__snake_case : List[Any] = wi[0].T
__snake_case : int = wi[1].T
else:
__snake_case : Dict = wi.T
__snake_case : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__snake_case : Any = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
__snake_case : Dict = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__snake_case : List[Any] = old["decoder/logits_dense/kernel"].T
return new
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__snake_case : int = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__snake_case : Optional[int] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__snake_case : int = state_dict["shared.weight"]
return state_dict
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Union[str, Any] = checkpoints.load_tax_checkpoint(__lowerCamelCase )
__snake_case : int = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
__snake_case : List[str] = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False , __lowerCamelCase = False , ):
__snake_case : str = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__snake_case : Optional[int] = UMTaEncoderModel(__lowerCamelCase )
else:
__snake_case : int = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
_snake_case : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 81 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 | 0 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a__ ( ):
UpperCAmelCase_ , UpperCAmelCase_ = 9, 14 # noqa: F841
UpperCAmelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase_ = defaultdict(lowerCAmelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase_ = mst(lowerCAmelCase__ )
UpperCAmelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase_ = tuple(answer[:2] )
UpperCAmelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 82 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : str=0.01 , __lowerCAmelCase : Any=1_0_0_0 ):
"""simple docstring"""
_lowerCamelCase : List[str] = p_stop
_lowerCamelCase : List[Any] = max_length
def __iter__( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCamelCase : int = random.random() < self.p_stop
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int=False , __lowerCAmelCase : Dict=True ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
for i in range(2 )
]
_lowerCamelCase : List[str] = [list(__lowerCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCAmelCase ) for shard in batch_sampler_shards] , [len(__lowerCAmelCase ) for e in expected] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : str = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : Tuple = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : str = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : List[str] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : Dict = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : Tuple = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : int = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : List[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Tuple = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : List[str] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Dict = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : str = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
_lowerCamelCase : Any = [BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , even_batches=__lowerCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=False ):
"""simple docstring"""
random.seed(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
IterableDatasetShard(
__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase , num_processes=__lowerCAmelCase , process_index=__lowerCAmelCase , split_batches=__lowerCAmelCase , )
for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Union[str, Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCAmelCase )
iterable_dataset_lists.append(list(__lowerCAmelCase ) )
_lowerCamelCase : Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCamelCase : Union[str, Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(len(__lowerCAmelCase ) % shard_batch_size == 0 )
_lowerCamelCase : List[str] = []
for idx in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCAmelCase ) < len(__lowerCAmelCase ):
reference += reference
self.assertListEqual(__lowerCAmelCase , reference[: len(__lowerCAmelCase )] )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 4_2
_lowerCamelCase : Any = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
# Edge case with a very small dataset
_lowerCamelCase : Optional[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = SkipBatchSampler(__lowerCAmelCase , 2 )
self.assertListEqual(list(__lowerCAmelCase ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = DataLoader(list(range(1_6 ) ) , batch_size=4 )
_lowerCamelCase : str = skip_first_batches(__lowerCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
Accelerator()
_lowerCamelCase : Any = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 83 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = []
lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
cnt += 1
topo.append(__SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
if cnt != len(__SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(__SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 84 | 0 |
def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a ( lowercase__ : int = 50_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [(i * (3 * i - 1)) // 2 for i in range(1 , lowercase__ )]
for i, pentagonal_i in enumerate(lowercase__ ):
for j in range(lowercase__ , len(lowercase__ ) ):
SCREAMING_SNAKE_CASE__ : List[str] = pentagonal_nums[j]
SCREAMING_SNAKE_CASE__ : List[str] = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE__ : Dict = pentagonal_j - pentagonal_i
if is_pentagonal(lowercase__ ) and is_pentagonal(lowercase__ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 'pixel_values'
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , "timm" )
super().__init__(UpperCAmelCase )
A_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(UpperCAmelCase , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
A_ = getattr(UpperCAmelCase , "use_pretrained_backbone" , UpperCAmelCase )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
A_ = config.out_indices if getattr(UpperCAmelCase , "out_indices" , UpperCAmelCase ) is not None else (-1,)
A_ = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A_ = self._backbone.return_layers
A_ = {layer["module"]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def __A ( cls : Optional[int] , UpperCAmelCase : List[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
A_ = kwargs.pop("config" , TimmBackboneConfig() )
A_ = kwargs.pop("use_timm_backbone" , UpperCAmelCase )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
A_ = kwargs.pop("num_channels" , config.num_channels )
A_ = kwargs.pop("features_only" , config.features_only )
A_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
A_ = kwargs.pop("out_indices" , config.out_indices )
A_ = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : Optional[int] ):
pass
def __A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=None , **UpperCAmelCase : int ):
A_ = return_dict if return_dict is not None else self.config.use_return_dict
A_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A_ = self._all_layers
A_ = self._backbone(UpperCAmelCase , **UpperCAmelCase )
A_ = self._return_layers
A_ = tuple(hidden_states[i] for i in self.out_indices )
else:
A_ = self._backbone(UpperCAmelCase , **UpperCAmelCase )
A_ = None
A_ = tuple(UpperCAmelCase )
A_ = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
A_ = (feature_maps,)
if output_hidden_states:
A_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase ) | 86 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_lowerCamelCase : Union[str, Any] = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : ArgumentParser) ->Tuple:
'''simple docstring'''
A__ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''Model\'s type.''')
train_parser.add_argument(
'''--tf_checkpoint''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''TensorFlow checkpoint path or folder.''')
train_parser.add_argument(
'''--pytorch_dump_output''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''Path to the PyTorch saved model output.''')
train_parser.add_argument('''--config''' , type=UpperCAmelCase__ , default='''''' , help='''Configuration file path or folder.''')
train_parser.add_argument(
'''--finetuning_task_name''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=UpperCAmelCase__)
def __init__( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , *UpperCAmelCase__ : Any , ) ->Optional[int]:
'''simple docstring'''
A__ = logging.get_logger('''transformers-cli/converting''')
self._logger.info(f"""Loading model {model_type}""")
A__ = model_type
A__ = tf_checkpoint
A__ = pytorch_dump_output
A__ = config
A__ = finetuning_task_name
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase__)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase__)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase__)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCAmelCase__)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase__)
if "ckpt" in self._tf_checkpoint.lower():
A__ = self._tf_checkpoint
A__ = ''''''
else:
A__ = self._tf_checkpoint
A__ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
UpperCAmelCase__ , self._config , self._pytorch_dump_output , UpperCAmelCase__)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase__)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase__)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''')
| 87 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase = '''true'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=16 ):
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(__SCREAMING_SNAKE_CASE )
lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
lowercase = dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=16 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
lowercase = get_dataloader(__SCREAMING_SNAKE_CASE , not dispatch_batches )
lowercase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for batch in dataloader:
lowercase , lowercase = batch.values()
with torch.no_grad():
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase , lowercase = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=16 ):
lowercase , lowercase , lowercase = get_basic_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase , lowercase = generate_predictions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False ):
lowercase = evaluate.load('glue' , 'mrpc' )
lowercase , lowercase = get_mrpc_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# First do baseline
lowercase , lowercase , lowercase = setup['no']
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=batch['labels'] )
lowercase = metric.compute()
# Then do distributed
lowercase , lowercase , lowercase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase = batch['labels']
lowercase , lowercase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
lowercase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase_ ( ):
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 84 | 0 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCAmelCase = get_logger()
UpperCAmelCase = None
class lowercase__ ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE) -> int:
super().__init__(features=SCREAMING_SNAKE_CASE)
import jax
from jaxlib.xla_client import Device
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
raise ValueError(
F'Expected {device} to be a `str` not {type(SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` '
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""")
_lowerCamelCase : str = device if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCamelCase : Optional[int] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '
F'device: {str(jax.devices()[0])}.')
_lowerCamelCase : str = str(jax.devices()[0])
_lowerCamelCase : Tuple = jnp_array_kwargs
@staticmethod
def UpperCamelCase_ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(SCREAMING_SNAKE_CASE): device for device in jax.devices()}
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[Any]:
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(SCREAMING_SNAKE_CASE , axis=0)
return column
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Optional[Any]:
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE , (str, bytes, type(SCREAMING_SNAKE_CASE))):
return value
elif isinstance(SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
_lowerCamelCase : List[str] = {}
if isinstance(SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCamelCase : int = {"""dtype""": jnp.intaa}
else:
_lowerCamelCase : Dict = {"""dtype""": jnp.intaa}
elif isinstance(SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
_lowerCamelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image):
_lowerCamelCase : List[Any] = np.asarray(SCREAMING_SNAKE_CASE)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCamelCase : List[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs})
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Any:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(SCREAMING_SNAKE_CASE , """__array__""") and not isinstance(SCREAMING_SNAKE_CASE , jax.Array):
_lowerCamelCase : str = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE) for substruct in data_struct])
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple)):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE) for substruct in data_struct])
return self._tensorize(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int:
return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE , map_list=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Mapping:
_lowerCamelCase : List[str] = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE)
return self.recursive_tensorize(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> "jax.Array":
_lowerCamelCase : str = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE , pa_table.column_names[0])
_lowerCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = self._consolidate(SCREAMING_SNAKE_CASE)
return column
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Mapping:
_lowerCamelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = self.recursive_tensorize(SCREAMING_SNAKE_CASE)
for column_name in batch:
_lowerCamelCase : int = self._consolidate(batch[column_name])
return batch
| 88 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : Any = """OwlViTImageProcessor"""
_UpperCamelCase : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case=None , snake_case=None , **snake_case ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="max_length" , snake_case="np" , **snake_case ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(snake_case , snake_case ) or (isinstance(snake_case , snake_case ) and not isinstance(text[0] , snake_case )):
lowercase = [self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )]
elif isinstance(snake_case , snake_case ) and isinstance(text[0] , snake_case ):
lowercase = []
# Maximum number of queries across batch
lowercase = max([len(snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case ) != max_num_queries:
lowercase = t + [' '] * (max_num_queries - len(snake_case ))
lowercase = self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )
encodings.append(snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase = BatchEncoding()
lowercase = input_ids
lowercase = attention_mask
if query_images is not None:
lowercase = BatchEncoding()
lowercase = self.image_processor(
snake_case , return_tensors=snake_case , **snake_case ).pixel_values
lowercase = query_pixel_values
if images is not None:
lowercase = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_object_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_image_guided_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 84 | 0 |
SCREAMING_SNAKE_CASE : Dict = "Input must be a string of 8 numbers plus letter"
SCREAMING_SNAKE_CASE : List[str] = "TRWAGMYFPDXBNJZSQVHLCKE"
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : List[Any] = F'''Expected string as input, found {type(lowerCamelCase_ ).__name__}'''
raise TypeError(lowerCamelCase_ )
_lowercase : Optional[Any] = spanish_id.replace('-' , '' ).upper()
if len(lowerCamelCase_ ) != 9:
raise ValueError(lowerCamelCase_ )
try:
_lowercase : Dict = int(spanish_id_clean[0:8] )
_lowercase : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCamelCase_ ) from ex
if letter.isdigit():
raise ValueError(lowerCamelCase_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = KandinskyVaaPriorPipeline
lowercase__ : List[str] = ["prompt"]
lowercase__ : Union[str, Any] = ["prompt", "negative_prompt"]
lowercase__ : Optional[int] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase__ : str = False
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return 1_00
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(lowerCamelCase_ )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
lowerCAmelCase__ = PriorTransformer(**lowerCamelCase_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowerCAmelCase__ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
lowerCAmelCase__ = CLIPVisionModelWithProjection(lowerCamelCase_ )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=lowerCamelCase_ , do_normalize=lowerCamelCase_ , do_resize=lowerCamelCase_ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_24 , )
return image_processor
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.dummy_prior
lowerCAmelCase__ = self.dummy_image_encoder
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = self.dummy_tokenizer
lowerCAmelCase__ = self.dummy_image_processor
lowerCAmelCase__ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=lowerCamelCase_ , clip_sample_range=10.0 , )
lowerCAmelCase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> int:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**lowerCamelCase_ )
lowerCAmelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCAmelCase__ = output.image_embeds
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
lowerCAmelCase__ = image[0, -10:]
lowerCAmelCase__ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
lowerCAmelCase__ = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = torch_device == '''cpu'''
lowerCAmelCase__ = True
lowerCAmelCase__ = False
self._test_inference_batch_single_identical(
test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , test_mean_pixel_difference=lowerCamelCase_ , )
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = torch_device == '''cpu'''
lowerCAmelCase__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCamelCase_ , test_mean_pixel_difference=lowerCamelCase_ , ) | 90 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
lowercase = model(snake_case , token_type_ids=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = self.num_labels
lowercase = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCamelCase : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
lowercase = inputs_dict['labels']
lowercase = inputs_dict['labels']
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case )
lowercase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
lowercase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 84 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _snake_case ( ):
A = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
return image
def _snake_case ( snake_case__ : Dict ):
A = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _snake_case ( snake_case__ : Dict , snake_case__ : int , snake_case__ : str ):
A = dct.pop(snake_case__ )
A = val
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
A = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
A = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
A = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
A = qkv_bias
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : int ):
A = 364 if 'coco' in model_name else 224
A = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
A = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
A = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
A = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
A = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
A = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any]=None , snake_case__ : List[Any]=False ):
A = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
A = tokenizer('\n' , add_special_tokens=snake_case__ ).input_ids[0]
A , A = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
A = BlipaForConditionalGeneration(snake_case__ ).eval()
A = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
A , A = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
A = 'cuda' if torch.cuda.is_available() else 'cpu'
A , A , A = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print('Done!' )
# update state dict keys
A = original_model.state_dict()
A = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
A = state_dict.pop(snake_case__ )
if key.startswith('Qformer.bert' ):
A = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
A = key.replace('self' , 'attention' )
if "opt_proj" in key:
A = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
A = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
A = key.replace('opt' , 'language' )
if key.startswith('t5' ):
A = key.replace('t5' , 'language' )
A = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
A , A = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
A = load_demo_image()
A = vis_processors['eval'](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
A = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(snake_case__ )
# create processor
A = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=snake_case__ , image_std=snake_case__ )
A = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
A = processor(images=snake_case__ , return_tensors='pt' ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
A = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
A = hf_model(snake_case__ , snake_case__ ).logits
else:
A = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
A = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
A = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
A = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
A = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=snake_case__ )
else:
# cast to same type
A = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
A = ''
A = tokenizer(snake_case__ , return_tensors='pt' ).input_ids.to(snake_case__ )
A = original_model.generate({'image': original_pixel_values} )
A = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , snake_case__ )
A = input_ids.shape[1]
A = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
A = [text.strip() for text in output_text]
print('HF generation:' , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
_lowercase = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
_lowercase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 91 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :CommonSchedulerState
# setable values
__magic_name__ :jnp.ndarray
__magic_name__ :jnp.ndarray
__magic_name__ :Optional[int] = None
@classmethod
def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return cls(common=__UpperCAmelCase , init_noise_sigma=__UpperCAmelCase , timesteps=__UpperCAmelCase )
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :DDPMSchedulerState
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
__magic_name__ :jnp.dtype
@property
def snake_case ( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , __UpperCAmelCase = 1_0_0_0 , __UpperCAmelCase = 0.00_01 , __UpperCAmelCase = 0.02 , __UpperCAmelCase = "linear" , __UpperCAmelCase = None , __UpperCAmelCase = "fixed_small" , __UpperCAmelCase = True , __UpperCAmelCase = "epsilon" , __UpperCAmelCase = jnp.floataa , ):
'''simple docstring'''
lowerCAmelCase__ :int = dtype
def snake_case ( self , __UpperCAmelCase = None ):
'''simple docstring'''
if common is None:
lowerCAmelCase__ :Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase__ :Tuple = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase__ :List[str] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCAmelCase , init_noise_sigma=__UpperCAmelCase , timesteps=__UpperCAmelCase , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
return sample
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = () ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ :Any = (jnp.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCAmelCase , timesteps=__UpperCAmelCase , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = state.common.alphas_cumprod[t]
lowerCAmelCase__ :List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase__ :List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase__ :Tuple = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase__ :Optional[Any] = jnp.clip(__UpperCAmelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase__ :Optional[int] = jnp.log(jnp.clip(__UpperCAmelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase__ :Any = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase__ :str = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase__ :int = variance
lowerCAmelCase__ :str = state.common.betas[t]
lowerCAmelCase__ :Union[str, Any] = (predicted_variance + 1) / 2
lowerCAmelCase__ :Dict = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = timestep
if key is None:
lowerCAmelCase__ :Any = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = jnp.split(__UpperCAmelCase , sample.shape[1] , axis=1 )
else:
lowerCAmelCase__ :Optional[int] = None
# 1. compute alphas, betas
lowerCAmelCase__ :Tuple = state.common.alphas_cumprod[t]
lowerCAmelCase__ :List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase__ :str = 1 - alpha_prod_t
lowerCAmelCase__ :int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ :Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase__ :str = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ :Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase__ :Optional[int] = jnp.clip(__UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ :Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase__ :Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ :Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase__ :List[Any] = jax.random.split(__UpperCAmelCase , num=1 )
lowerCAmelCase__ :Union[str, Any] = jax.random.normal(__UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCAmelCase , __UpperCAmelCase , predicted_variance=__UpperCAmelCase ) ** 0.5) * noise
lowerCAmelCase__ :Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase__ :str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCAmelCase , state=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
return add_noise_common(state.common , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
return get_velocity_common(state.common , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 93 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
UpperCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
UpperCAmelCase = re.compile(R'''^\s*else:''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.readlines()
lowercase = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase = re.findall(r'\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase = []
for key in import_dict_objects.keys():
lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
lowercase = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
lowercase = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
UpperCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase = direct_transformers_import(__SCREAMING_SNAKE_CASE )
lowercase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' ) as f:
lowercase = f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , __SCREAMING_SNAKE_CASE ) ) )
lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 84 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int = 3 , __A : int = 7 , __A : int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] =0
lowercase : List[Any] =1
for current_denominator in range(1 , limit + 1 ):
lowercase : Dict =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowercase : Tuple =current_numerator
lowercase : int =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 94 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''T''')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = data
lowercase = None
def __str__( self ):
return F'''{self.data}'''
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
lowercase = None
def __iter__( self ):
lowercase = self.top
while node:
yield node.data
lowercase = node.next
def __str__( self ):
return "->".join([str(snake_case ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.top is None
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = Node(snake_case )
if not self.is_empty():
lowercase = self.top
lowercase = node
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , snake_case )
lowercase = self.top
lowercase = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" ,[
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] ,)
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" ,"w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" ,"w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" ,"w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
UpperCAmelCase_ : List[Any] = DatasetInfosDict.from_directory(A__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" ,[
DatasetInfo(),
DatasetInfo(
description="foo" ,features=Features({"a": Value("int32" )} ) ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train"}] ,download_size=42 ,),
] ,)
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Tuple = str(A__ )
dataset_info.write_to_directory(A__ )
UpperCAmelCase_ : Dict = DatasetInfo.from_directory(A__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A__ ,"dataset_info.json" ) )
def snake_case ( ):
UpperCAmelCase_ : List[Any] = DatasetInfo(
description="foo" ,citation="bar" ,homepage="https://foo.bar" ,license="CC0" ,features=Features({"a": Value("int32" )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train", "num_examples": 42}] ,download_checksums={} ,download_size=13_37 ,post_processing_size=4_42 ,dataset_size=12_34 ,size_in_bytes=13_37 + 4_42 + 12_34 ,)
UpperCAmelCase_ : List[Any] = dataset_info._to_yaml_dict()
assert sorted(A__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) )
UpperCAmelCase_ : List[Any] = yaml.safe_dump(A__ )
UpperCAmelCase_ : Union[str, Any] = yaml.safe_load(A__ )
assert dataset_info_yaml_dict == reloaded
def snake_case ( ):
UpperCAmelCase_ : Tuple = DatasetInfo()
UpperCAmelCase_ : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" ,[
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" ,features=Features({"a": Value("int32" )} ) ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train"}] ,download_size=42 ,)
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=13_37 ),
} ),
] ,)
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : List[Any] = str(A__ )
dataset_infos_dict.write_to_directory(A__ )
UpperCAmelCase_ : Tuple = DatasetInfosDict.from_directory(A__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCAmelCase_ : List[str] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCAmelCase_ : Union[str, Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A__ ,"README.md" ) )
| 95 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = LlamaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = LlamaModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
lowercase = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = True
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase : List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : int = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LlamaModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'single_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'multi_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 10] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = LlamaModel(snake_case )
original_model.to(snake_case )
original_model.eval()
lowercase = original_model(snake_case ).last_hidden_state
lowercase = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {'type': scaling_type, 'factor': 10.0}
lowercase = LlamaModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
lowercase = scaled_model(snake_case ).last_hidden_state
lowercase = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
lowercase = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowercase = 'Simply put, the theory of relativity states that '
lowercase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowercase = tokenizer.encode(snake_case , return_tensors='pt' )
lowercase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=snake_case )
# greedy generation outputs
lowercase = model.generate(snake_case , max_new_tokens=64 , top_p=snake_case , temperature=1 , do_sample=snake_case )
lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case )
self.assertEqual(snake_case , snake_case )
| 84 | 0 |
"""simple docstring"""
import enum
import shutil
import sys
__lowerCamelCase , __lowerCamelCase = shutil.get_terminal_size()
__lowerCamelCase = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class __A ( enum.Enum ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
def a ( __UpperCAmelCase : str , __UpperCAmelCase : Any="" ) -> Any:
sys.stdout.write(str(__UpperCAmelCase ) + end )
sys.stdout.flush()
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict="" ) -> Optional[Any]:
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , __UpperCAmelCase )
def a ( ) -> Union[str, Any]:
forceWrite("""\r""" )
def a ( __UpperCAmelCase : int , __UpperCAmelCase : str ) -> Optional[Any]:
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def a ( ) -> List[str]:
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def a ( ) -> Optional[Any]:
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 96 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 | 0 |
from __future__ import annotations
__a = 8.988E9 # units = N * m^s * C^-2
def a ( snake_case__: float , snake_case__: float , snake_case__: float , snake_case__: float ):
'''simple docstring'''
lowercase_ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase_ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase_ = abs(snake_case__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase_ = abs(snake_case__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase_ = (COULOMBS_CONSTANT * charge_product / abs(snake_case__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = OpenAIGPTTokenizer
_UpperCamelCase : List[Any] = OpenAIGPTTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowercase = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase = 'lower'
lowercase = ['low', 'er</w>']
lowercase = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = tokens + ['<unk>']
lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
lowercase = 'This is a simple input'
lowercase = ['This is a simple input 1', 'This is a simple input 2']
lowercase = ('This is a simple input', 'This is a pair')
lowercase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A_ ( __lowerCamelCase ):
'''simple docstring'''
pass
| 84 | 0 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
lowercase__ : Optional[int] = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def a__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = Github(os.environ['''GITHUB_TOKEN'''] )
_UpperCamelCase = g.get_repo('''huggingface/transformers''' )
_UpperCamelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
_UpperCamelCase = sorted([comment for comment in issue.get_comments()], key=lambda lowercase : i.created_at, reverse=lowercase )
_UpperCamelCase = comments[0] if len(lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 98 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase = 0
# Doctest custom flag to ignore output.
UpperCAmelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase = doctest.OutputChecker
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , snake_case , snake_case , snake_case )
UpperCAmelCase = CustomOutputChecker
UpperCAmelCase = HfDoctestModule
UpperCAmelCase = HfDocTestParser
| 84 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = '▁'
SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
SCREAMING_SNAKE_CASE = {
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
SCREAMING_SNAKE_CASE = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
_lowerCamelCase = []
_lowerCamelCase = []
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=None , __A=None , __A=None , __A = None , __A=None , __A=False , **__A , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
__a = legacy_behaviour
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , tokenizer_file=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__A , **__A , )
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
__a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__a = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a = 1
__a = len(self.sp_model )
__a = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A )
}
__a = {v: k for k, v in self.lang_code_to_id.items()}
__a = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__a = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__a = src_lang if src_lang is not None else """eng_Latn"""
__a = self.lang_code_to_id[self._src_lang]
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
__a = self.__dict__.copy()
__a = None
__a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __A ):
__a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def snake_case_ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case_ ( self ):
return self._src_lang
@src_lang.setter
def snake_case_ ( self , __A ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self , __A , __A = None , __A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
__a = [1] * len(self.prefix_tokens )
__a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def snake_case_ ( self , __A , __A = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self , __A , __A = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , __A , __A , __A , __A , **__A ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__a = src_lang
__a = self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
__a = self.convert_tokens_to_ids(__A )
__a = tgt_lang_id
return inputs
def snake_case_ ( self ):
__a = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , __A ):
return self.sp_model.encode(__A , out_type=__A )
def snake_case_ ( self , __A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self , __A ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self , __A ):
__a = """""".join(__A ).replace(__A , """ """ ).strip()
return out_string
def snake_case_ ( self , __A , __A = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def snake_case_ ( self , __A , __A = "eng_Latn" , __A = None , __A = "fra_Latn" , **__A , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def snake_case_ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self , __A ):
__a = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
def snake_case_ ( self , __A ):
__a = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
| 99 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False ):
super().__init__()
lowercase = n_token
lowercase = d_embed
lowercase = d_proj
lowercase = cutoffs + [n_token]
lowercase = [0] + self.cutoffs
lowercase = div_val
lowercase = self.cutoffs[0]
lowercase = len(self.cutoffs ) - 1
lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase = nn.ModuleList()
lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
else:
self.out_projs.append(snake_case )
self.out_layers.append(nn.Linear(snake_case , snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
self.out_layers.append(nn.Linear(snake_case , r_idx - l_idx ) )
lowercase = keep_order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if proj is None:
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase = nn.functional.linear(snake_case , proj.t().contiguous() )
lowercase = nn.functional.linear(snake_case , snake_case , bias=snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowercase = hidden[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = hidden.view(-1 , hidden.size(-1 ) )
lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowercase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase = labels != -100
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = (
-nn.functional.log_softmax(snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase = nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
if labels is None:
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
lowercase = 0
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase = (labels >= l_idx) & (labels < r_idx)
lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase = labels.index_select(0 , snake_case ) - l_idx
lowercase = head_logprob.index_select(0 , snake_case )
lowercase = hidden.index_select(0 , snake_case )
else:
lowercase = hidden
if i == 0:
if labels is not None:
lowercase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if self.n_clusters == 0:
lowercase = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
lowercase , lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.out_layers[0].weight[l_idx:r_idx]
lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase = self.out_layers[i].weight
lowercase = self.out_layers[i].bias
if i == 0:
lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
lowercase , lowercase , lowercase = weights[0], biases[0], self.out_projs[0]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
lowercase , lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase = head_logprob[:, : self.cutoffs[0]]
else:
lowercase , lowercase , lowercase = weights[i], biases[i], self.out_projs[i]
lowercase = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
lowercase = nn.functional.log_softmax(snake_case , dim=1 )
lowercase = head_logprob[:, -i] + tail_logprob_i
lowercase = logprob_i
return out
| 84 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] = logging.get_logger(__name__)
def __snake_case ( lowerCAmelCase_ ) -> List[Any]:
# initialize config
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE__ = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE__ = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
SCREAMING_SNAKE_CASE__ = DetrConfig(use_timm_backbone=lowerCAmelCase_ , backbone_config=lowerCAmelCase_ )
# set label attributes
SCREAMING_SNAKE_CASE__ = '''panoptic''' in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE__ = 2_5_0
else:
SCREAMING_SNAKE_CASE__ = 9_1
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = '''coco-detection-id2label.json'''
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __snake_case ( lowerCAmelCase_ ) -> List[Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
SCREAMING_SNAKE_CASE__ = state_dict.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[str]:
SCREAMING_SNAKE_CASE__ = ''''''
if is_panoptic:
SCREAMING_SNAKE_CASE__ = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:2_5_6, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:2_5_6]
SCREAMING_SNAKE_CASE__ = in_proj_weight[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-2_5_6:, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:2_5_6, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:2_5_6]
SCREAMING_SNAKE_CASE__ = in_proj_weight[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-2_5_6:, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight_cross_attn[:2_5_6, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias_cross_attn[:2_5_6]
SCREAMING_SNAKE_CASE__ = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias_cross_attn[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE__ = in_proj_weight_cross_attn[-2_5_6:, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias_cross_attn[-2_5_6:]
def __snake_case ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_detr_config(lowerCAmelCase_ )
# load original model from torch hub
SCREAMING_SNAKE_CASE__ = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'''Converting model {model_name}...''' )
SCREAMING_SNAKE_CASE__ = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE__ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCAmelCase_ ):
if is_panoptic:
SCREAMING_SNAKE_CASE__ = '''detr.''' + src
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCAmelCase_ , is_panoptic=lowerCAmelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE__ = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
SCREAMING_SNAKE_CASE__ = state_dict.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE__ = state_dict.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
SCREAMING_SNAKE_CASE__ = state_dict.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
SCREAMING_SNAKE_CASE__ = state_dict.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ = DetrForSegmentation(lowerCAmelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE__ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
SCREAMING_SNAKE_CASE__ = DetrImageProcessor(format=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = encoding['''pixel_values''']
SCREAMING_SNAKE_CASE__ = detr(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(lowerCAmelCase_ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_A : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
_A : Any = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 100 |
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case ) != 0:
lowercase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case ) != cols:
raise error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise error
lowercase = rows
else:
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case ).determinant()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if (row + column) % 2 == 0:
return self.get_minor(snake_case , snake_case )
return -1 * self.get_minor(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[self.get_minor(snake_case , snake_case ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case )
else:
lowercase = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in column:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
lowercase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , snake_case ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , snake_case ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , snake_case ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , snake_case ):
if isinstance(snake_case , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case , snake_case ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case , snake_case ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
lowercase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case ):
return sum(row[i] * column[i] for i in range(len(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Any = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A__, A__ )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = emb.weight.shape
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Linear(A__, A__, bias=A__ )
SCREAMING_SNAKE_CASE_ : Dict = emb.weight.data
return lin_layer
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(A__, map_location='cpu' )
SCREAMING_SNAKE_CASE_ : Tuple = mam_aaa['args'] or mam_aaa['cfg']['model']
SCREAMING_SNAKE_CASE_ : Any = mam_aaa['model']
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = state_dict['encoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE_ : Dict = MaMaaaConfig(
vocab_size=A__, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='relu', )
SCREAMING_SNAKE_CASE_ : int = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MaMaaaForConditionalGeneration(A__ )
model.model.load_state_dict(A__, strict=A__ )
SCREAMING_SNAKE_CASE_ : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase__ : Optional[Any] =parser.parse_args()
lowerCAmelCase__ : int =convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 101 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=8 ):
lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , ):
super().__init__()
self.register_modules(
unet=snake_case , scheduler=snake_case , movq=snake_case , )
lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
if latents is None:
lowercase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase = latents.to(snake_case )
lowercase = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case )
# We'll offload the last model manually.
lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self , snake_case , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ):
lowercase = self._execution_device
lowercase = guidance_scale > 1.0
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
lowercase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
if do_classifier_free_guidance:
lowercase = image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = negative_image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case )
self.scheduler.set_timesteps(snake_case , device=snake_case )
lowercase = self.scheduler.timesteps
lowercase = self.unet.config.in_channels
lowercase , lowercase = downscale_height_and_width(snake_case , snake_case , self.movq_scale_factor )
# create initial latent
lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case , snake_case , snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = {'image_embeds': image_embeds}
lowercase = self.unet(
sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0]
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase = noise_pred.chunk(2 )
lowercase , lowercase = variance_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(
snake_case , snake_case , snake_case , generator=snake_case , )[0]
# post-processing
lowercase = self.movq.decode(snake_case , force_not_quantize=snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase = image * 0.5 + 0.5
lowercase = image.clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 84 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=1_3 , _A=3 , _A=2_2_4 , _A=3_0 , _A=4_0_0 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase : int = size if size is not None else {"""height""": 1_8, """width""": 1_8}
UpperCamelCase : int = parent
UpperCamelCase : Any = batch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : List[str] = image_size
UpperCamelCase : Optional[int] = min_resolution
UpperCamelCase : List[str] = max_resolution
UpperCamelCase : List[Any] = do_resize
UpperCamelCase : int = size
UpperCamelCase : Optional[Any] = do_normalize
UpperCamelCase : int = image_mean
UpperCamelCase : int = image_std
def _a ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : str = ViTImageProcessor if is_vision_available() else None
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self )
@property
def _a ( self ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , """image_mean""" ) )
self.assertTrue(hasattr(_A , """image_std""" ) )
self.assertTrue(hasattr(_A , """do_normalize""" ) )
self.assertTrue(hasattr(_A , """do_resize""" ) )
self.assertTrue(hasattr(_A , """size""" ) )
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCamelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase : Union[str, Any] = image_processor(_A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCamelCase : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase : Tuple = image_processor(_A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCamelCase : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase : Any = image_processor(_A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 102 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 84 | 0 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return 1 if input_a == input_a else 0
def snake_case ( ) -> None:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 103 |
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
return n == n[::-1]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100_0000 ):
lowercase = 0
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if is_palindrome(__SCREAMING_SNAKE_CASE ) and is_palindrome(bin(__SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 84 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> None:
A__ = value
A__ = None
A__ = None
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> None:
A__ = tree
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 | 0 |
import math
def __UpperCAmelCase ( lowerCamelCase_ : list , lowerCamelCase_ : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = int(math.floor(math.sqrt(lowerCamelCase_ ) ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
while arr[min(lowerCamelCase_ , lowerCamelCase_ ) - 1] < x:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = step
step += int(math.floor(math.sqrt(lowerCamelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE_ : Optional[Any] = prev + 1
if prev == min(lowerCamelCase_ , lowerCamelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
UpperCamelCase__ : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ : int = [int(item) for item in user_input.split(''',''')]
UpperCamelCase__ : List[Any] = int(input('''Enter the number to be searched:\n'''))
UpperCamelCase__ : List[Any] = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(F"""Number {x} is at index {res}""")
| 105 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
__snake_case :Dict =logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
A_ : Optional[int] = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
A_ : bool = field(
default=_lowerCamelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class lowerCAmelCase__ :
A_ : str = field(
default=_lowerCamelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : str = field(
default=_lowerCamelCase , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Train language if it is different from the evaluation language.'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : Optional[bool] = field(
default=_lowerCamelCase , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
A_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCamelCase_ ( ) -> List[str]:
'''simple docstring'''
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
A = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
A = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A = train_dataset.features['label'].names
if training_args.do_eval:
A = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A = eval_dataset.features['label'].names
if training_args.do_predict:
A = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A = predict_dataset.features['label'].names
# Labels
A = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase__ , idalabel={str(lowerCAmelCase__ ): label for i, label in enumerate(lowerCAmelCase__ )} , labelaid={label: i for i, label in enumerate(lowerCAmelCase__ )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
A = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
A = False
def preprocess_function(lowerCAmelCase__ : Dict ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=lowerCAmelCase__ , max_length=data_args.max_seq_length , truncation=lowerCAmelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
A = min(len(lowerCAmelCase__ ) , data_args.max_train_samples )
A = train_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
A = train_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCAmelCase__ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A = min(len(lowerCAmelCase__ ) , data_args.max_eval_samples )
A = eval_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
A = eval_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
A = min(len(lowerCAmelCase__ ) , data_args.max_predict_samples )
A = predict_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
A = predict_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
A = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ : EvalPrediction ):
A = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase__ ) else p.predictions
A = np.argmax(lowerCAmelCase__ , axis=1 )
return metric.compute(predictions=lowerCAmelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
A = default_data_collator
elif training_args.fpaa:
A = DataCollatorWithPadding(lowerCAmelCase__ , pad_to_multiple_of=8 )
else:
A = None
# Initialize our Trainer
A = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
A = None
if training_args.resume_from_checkpoint is not None:
A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A = last_checkpoint
A = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
A = train_result.metrics
A = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
A = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCAmelCase__ )
trainer.save_metrics('train' , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
A = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
A = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('eval' , lowerCAmelCase__ )
trainer.save_metrics('eval' , lowerCAmelCase__ )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
A , A , A = trainer.predict(lowerCAmelCase__ , metric_key_prefix='predict' )
A = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCAmelCase__ )
)
A = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('predict' , lowerCAmelCase__ )
trainer.save_metrics('predict' , lowerCAmelCase__ )
A = np.argmax(lowerCAmelCase__ , axis=1 )
A = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCAmelCase__ ):
A = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main() | 106 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = []
lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
cnt += 1
topo.append(__SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
if cnt != len(__SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(__SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 84 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : str=0 ) -> int:
_A = floats_tensor((1, 3, 1_28, 1_28), rng=random.Random(UpperCamelCase__ ) )
_A = np.random.RandomState(UpperCamelCase__ )
_A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = self.get_dummy_inputs()
_A = pipe(**UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
_A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def __UpperCAmelCase ( self : int ) -> List[Any]:
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
_A = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = self.get_dummy_inputs()
_A = pipe(**UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCAmelCase ( self : Optional[int] ) -> int:
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
_A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# warmup pass to apply optimizations
_A = pipe(**self.get_dummy_inputs() )
_A = self.get_dummy_inputs()
_A = pipe(**UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCAmelCase ( self : str ) -> Tuple:
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
_A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = self.get_dummy_inputs()
_A = pipe(**UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
_A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = self.get_dummy_inputs()
_A = pipe(**UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
_A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = self.get_dummy_inputs()
_A = pipe(**UpperCamelCase__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : str ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : Any ) -> Tuple:
_A = ort.SessionOptions()
_A = False
return options
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=UpperCamelCase__, feature_extractor=UpperCamelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = 'A fantasy landscape, trending on artstation'
_A = np.random.RandomState(0 )
_A = pipe(
prompt=UpperCamelCase__, image=UpperCamelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=UpperCamelCase__, output_type='np', )
_A = output.images
_A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_A = init_image.resize((7_68, 5_12) )
_A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx' )
_A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=UpperCamelCase__, safety_checker=UpperCamelCase__, feature_extractor=UpperCamelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_A = 'A fantasy landscape, trending on artstation'
_A = np.random.RandomState(0 )
_A = pipe(
prompt=UpperCamelCase__, image=UpperCamelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=UpperCamelCase__, output_type='np', )
_A = output.images
_A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 107 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__a: Optional[int] = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[int] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__a: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 108 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
__SCREAMING_SNAKE_CASE = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] ,dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa ),
}
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )["""last_hidden_state"""]
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape ,lowerCamelCase )
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 109 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase = '''true'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=16 ):
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(__SCREAMING_SNAKE_CASE )
lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
lowercase = dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=16 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
lowercase = get_dataloader(__SCREAMING_SNAKE_CASE , not dispatch_batches )
lowercase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for batch in dataloader:
lowercase , lowercase = batch.values()
with torch.no_grad():
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase , lowercase = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=16 ):
lowercase , lowercase , lowercase = get_basic_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase , lowercase = generate_predictions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False ):
lowercase = evaluate.load('glue' , 'mrpc' )
lowercase , lowercase = get_mrpc_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# First do baseline
lowercase , lowercase , lowercase = setup['no']
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=batch['labels'] )
lowercase = metric.compute()
# Then do distributed
lowercase , lowercase , lowercase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase = batch['labels']
lowercase , lowercase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
lowercase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase_ ( ):
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 84 | 0 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : Any = set()
UpperCAmelCase__ : Any = []
def parse_line(_snake_case ):
for line in fp:
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase__ : List[Any] = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_snake_case ) > 0:
UpperCAmelCase__ : Optional[Any] = '\n'.join(_snake_case )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(_snake_case )
buffer.clear()
continue
else:
UpperCAmelCase__ : Optional[int] = line.strip()
buffer.append(_snake_case )
if from_gh:
for filename in os.listdir(_snake_case ):
UpperCAmelCase__ : Optional[int] = os.path.join(_snake_case ,_snake_case )
if not os.path.isdir(_snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(_snake_case ) as fp:
parse_line(_snake_case )
else:
try:
with zipfile.ZipFile(_snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(_snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_snake_case ) as fp:
parse_line(_snake_case )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : Union[str, Any] = set()
UpperCAmelCase__ : Tuple = [os.path.join(_snake_case ,_snake_case ) for p in os.listdir(_snake_case ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_snake_case ,_snake_case ) )
return selected_warnings
if __name__ == "__main__":
def lowerCamelCase ( _snake_case ):
return values.split(',' )
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCamelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCamelCase__ = extract_warnings(args.output_dir, args.targets)
UpperCamelCase__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 110 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : Any = """OwlViTImageProcessor"""
_UpperCamelCase : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case=None , snake_case=None , **snake_case ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="max_length" , snake_case="np" , **snake_case ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(snake_case , snake_case ) or (isinstance(snake_case , snake_case ) and not isinstance(text[0] , snake_case )):
lowercase = [self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )]
elif isinstance(snake_case , snake_case ) and isinstance(text[0] , snake_case ):
lowercase = []
# Maximum number of queries across batch
lowercase = max([len(snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case ) != max_num_queries:
lowercase = t + [' '] * (max_num_queries - len(snake_case ))
lowercase = self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )
encodings.append(snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase = BatchEncoding()
lowercase = input_ids
lowercase = attention_mask
if query_images is not None:
lowercase = BatchEncoding()
lowercase = self.image_processor(
snake_case , return_tensors=snake_case , **snake_case ).pixel_values
lowercase = query_pixel_values
if images is not None:
lowercase = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_object_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_image_guided_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 84 | 0 |
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
for i in range(len(__SCREAMING_SNAKE_CASE ) - 1 , 0 , -1 ):
UpperCamelCase :Dict = False
for j in range(__SCREAMING_SNAKE_CASE , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCamelCase , UpperCamelCase :Union[str, Any] = unsorted[j - 1], unsorted[j]
UpperCamelCase :List[str] = True
for j in range(__SCREAMING_SNAKE_CASE ):
if unsorted[j] > unsorted[j + 1]:
UpperCamelCase , UpperCamelCase :Dict = unsorted[j + 1], unsorted[j]
UpperCamelCase :str = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
__snake_case = [int(item) for item in user_input.split(""",""")]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 658 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84 | 0 |
from collections.abc import Callable
import numpy as np
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> int:
__lowercase = int(np.ceil((x_end - xa) / step_size ) )
__lowercase = np.zeros((n + 1,) )
__lowercase = ya
__lowercase = xa
for k in range(__SCREAMING_SNAKE_CASE ):
__lowercase = y[k] + step_size * ode_func(__SCREAMING_SNAKE_CASE , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 375 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
lowercase = model(snake_case , token_type_ids=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = self.num_labels
lowercase = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCamelCase : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
lowercase = inputs_dict['labels']
lowercase = inputs_dict['labels']
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case )
lowercase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
lowercase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 84 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
A__ : Any = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
A__ : Union[str, Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
A__ : List[str] = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
], )
def lowercase__ ( self : Dict ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def lowercase__ ( self : List[str], lowerCamelCase : str, lowerCamelCase : Dict, lowerCamelCase : Optional[int]=None, lowerCamelCase : Any="uniform_average", lowerCamelCase : List[Any]=True ):
'''simple docstring'''
lowercase__ = mean_squared_error(
lowerCamelCase, lowerCamelCase, sample_weight=lowerCamelCase, multioutput=lowerCamelCase, squared=lowerCamelCase )
return {"mse": mse}
| 183 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__lowerCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _UpperCAmelCase ( __A : int , __A : Optional[Any] , __A : List[str] , __A : Optional[int] , __A : int ):
for attribute in key.split('''.''' ):
a_ : List[Any] = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if weight_type is not None:
a_ : Tuple = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
else:
a_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
a_ : int = value
elif weight_type == "weight_g":
a_ : Any = value
elif weight_type == "weight_v":
a_ : Optional[Any] = value
elif weight_type == "bias":
a_ : Dict = value
elif weight_type == "running_mean":
a_ : List[Any] = value
elif weight_type == "running_var":
a_ : Optional[Any] = value
elif weight_type == "num_batches_tracked":
a_ : Union[str, Any] = value
elif weight_type == "inv_freq":
a_ : int = value
else:
a_ : List[str] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _UpperCAmelCase ( __A : str , __A : Any , __A : int ):
a_ : Any = []
a_ : Any = fairseq_model.state_dict()
a_ : Dict = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
a_ : int = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
a_ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
a_ : Any = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
a_ : Dict = True
if "*" in mapped_key:
a_ : Optional[Any] = name.split(__SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
a_ : str = mapped_key.replace('''*''' , __SCREAMING_SNAKE_CASE )
if "pos_bias_u" in name:
a_ : Optional[Any] = None
elif "pos_bias_v" in name:
a_ : Tuple = None
elif "weight_g" in name:
a_ : Any = '''weight_g'''
elif "weight_v" in name:
a_ : Optional[Any] = '''weight_v'''
elif "bias" in name:
a_ : Dict = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ : Dict = '''weight'''
elif "running_mean" in name:
a_ : Tuple = '''running_mean'''
elif "inv_freq" in name:
a_ : int = '''inv_freq'''
elif "running_var" in name:
a_ : Tuple = '''running_var'''
elif "num_batches_tracked" in name:
a_ : str = '''num_batches_tracked'''
else:
a_ : int = None
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(f'Unused weights: {unused_weights}' )
def _UpperCAmelCase ( __A : Tuple , __A : Optional[Any] , __A : Union[str, Any] , __A : int , __A : Tuple ):
a_ : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
a_ : List[str] = name.split('''.''' )
a_ : Optional[Any] = int(items[0] )
a_ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
a_ : Optional[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
a_ : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
a_ : str = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
a_ : str = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : List[Any] , __A : str=None , __A : Tuple=None , __A : str=True ):
if config_path is not None:
a_ : Optional[Any] = WavaVecaConformerConfig.from_pretrained(__SCREAMING_SNAKE_CASE , hidden_act='''swish''' )
else:
a_ : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
a_ : List[str] = '''rotary'''
if is_finetuned:
if dict_path:
a_ : Any = Dictionary.load(__SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ : int = target_dict.pad_index
a_ : Union[str, Any] = target_dict.bos_index
a_ : Optional[int] = target_dict.eos_index
a_ : str = len(target_dict.symbols )
a_ : Any = os.path.join(__SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__SCREAMING_SNAKE_CASE ) )
return
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
a_ : str = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ : Union[str, Any] = 0
a_ : Optional[Any] = 1
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Tuple = WavaVecaCTCTokenizer(
__SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__SCREAMING_SNAKE_CASE , )
a_ : List[Any] = True if config.feat_extract_norm == '''layer''' else False
a_ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
a_ : Any = WavaVecaProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
a_ : str = WavaVecaConformerForCTC(__SCREAMING_SNAKE_CASE )
else:
a_ : Optional[Any] = WavaVecaConformerForPreTraining(__SCREAMING_SNAKE_CASE )
if is_finetuned:
a_ , a_ , a_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
a_ : str = argparse.Namespace(task='''audio_pretraining''' )
a_ : List[Any] = fairseq.tasks.setup_task(__SCREAMING_SNAKE_CASE )
a_ , a_ , a_ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__SCREAMING_SNAKE_CASE )
a_ : int = model[0].eval()
recursively_load_weights(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__lowerCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 466 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 | 0 |
'''simple docstring'''
import math
def __UpperCAmelCase ( A : Tuple ) -> str:
UpperCAmelCase_ : int = [True] * n
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCAmelCase_ : int = i * 2
while index < n:
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Optional[int] = index + i
UpperCAmelCase_ : Dict = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def __UpperCAmelCase ( A : Tuple = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> List[str]:
UpperCAmelCase_ : str = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 1_0_0
UpperCAmelCase_ : str = prime_sieve(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : int = primes[prime_index]
while (last_prime**2) <= limit:
UpperCAmelCase_ : Tuple = primes[prime_index + 1]
UpperCAmelCase_ : List[str] = last_prime**2
UpperCAmelCase_ : Dict = next_prime**2
# Get numbers divisible by lps(current)
UpperCAmelCase_ : str = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCAmelCase_ : str = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCAmelCase_ : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCAmelCase_ : Optional[int] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 541 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
UpperCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
UpperCAmelCase = re.compile(R'''^\s*else:''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.readlines()
lowercase = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase = re.findall(r'\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase = []
for key in import_dict_objects.keys():
lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
lowercase = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
lowercase = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
UpperCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase = direct_transformers_import(__SCREAMING_SNAKE_CASE )
lowercase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' ) as f:
lowercase = f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , __SCREAMING_SNAKE_CASE ) ) )
lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 84 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _a ( __lowerCamelCase ):
"""simple docstring"""
A_ = """sew-d"""
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase=2 , _UpperCAmelCase=512 , _UpperCAmelCase=256 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=("p2c", "c2p") , _UpperCAmelCase="layer_norm" , _UpperCAmelCase="gelu_python" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-7 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=True , _UpperCAmelCase=0.0_5 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , **_UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = feat_extract_norm
UpperCamelCase_ = feat_extract_activation
UpperCamelCase_ = list(_UpperCAmelCase )
UpperCamelCase_ = list(_UpperCAmelCase )
UpperCamelCase_ = list(_UpperCAmelCase )
UpperCamelCase_ = conv_bias
UpperCamelCase_ = num_conv_pos_embeddings
UpperCamelCase_ = num_conv_pos_embedding_groups
UpperCamelCase_ = len(self.conv_dim )
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = squeeze_factor
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = position_buckets
UpperCamelCase_ = share_att_key
UpperCamelCase_ = relative_attention
UpperCamelCase_ = norm_rel_ebd
UpperCamelCase_ = list(_UpperCAmelCase )
UpperCamelCase_ = hidden_act
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = activation_dropout
UpperCamelCase_ = feat_proj_dropout
UpperCamelCase_ = final_dropout
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = feature_layer_norm_eps
UpperCamelCase_ = initializer_range
UpperCamelCase_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase_ = apply_spec_augment
UpperCamelCase_ = mask_time_prob
UpperCamelCase_ = mask_time_length
UpperCamelCase_ = mask_time_min_masks
UpperCamelCase_ = mask_feature_prob
UpperCamelCase_ = mask_feature_length
UpperCamelCase_ = mask_feature_min_masks
# ctc loss
UpperCamelCase_ = ctc_loss_reduction
UpperCamelCase_ = ctc_zero_infinity
# sequence classification
UpperCamelCase_ = use_weighted_layer_sum
UpperCamelCase_ = classifier_proj_size
@property
def _UpperCAmelCase ( self ) -> Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 23 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''T''')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = data
lowercase = None
def __str__( self ):
return F'''{self.data}'''
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
lowercase = None
def __iter__( self ):
lowercase = self.top
while node:
yield node.data
lowercase = node.next
def __str__( self ):
return "->".join([str(snake_case ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.top is None
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = Node(snake_case )
if not self.is_empty():
lowercase = self.top
lowercase = node
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , snake_case )
lowercase = self.top
lowercase = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ ( __lowerCamelCase ):
'''simple docstring'''
_lowerCamelCase = """new-model"""
if is_tf_available():
class lowerCamelCase__ ( __lowerCamelCase ):
'''simple docstring'''
_lowerCamelCase = NewModelConfig
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ) -> Dict:
A = """bert-base-cased"""
A = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
A = TFAutoModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
@slow
def UpperCamelCase__ ( self ) -> str:
A = """bert-base-cased"""
A = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
A = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
@slow
def UpperCamelCase__ ( self ) -> Any:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
A = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase_ )
A , A = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase_ ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
@slow
def UpperCamelCase__ ( self ) -> List[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
A = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
A = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase_ )
A , A = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase_ ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
A = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
A , A = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
@slow
def UpperCamelCase__ ( self ) -> List[str]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
A = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
@slow
def UpperCamelCase__ ( self ) -> Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
A = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
@slow
@require_tensorflow_probability
def UpperCamelCase__ ( self ) -> Union[str, Any]:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
A = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
A = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase_ )
A , A = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase_ ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertEqual(model.num_parameters() ,1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase_ ) ,1_4_4_1_0 )
def UpperCamelCase__ ( self ) -> int:
A = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertEqual(model.num_parameters() ,1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase_ ) ,1_4_4_1_0 )
def UpperCamelCase__ ( self ) -> str:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
A = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
A = copy.deepcopy(model.config )
A = ["""FunnelBaseModel"""]
A = TFAutoModel.from_config(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase_ )
A = TFAutoModel.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[int]:
try:
AutoConfig.register("""new-model""" ,lowerCamelCase_ )
A = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase_ ):
auto_class.register(lowerCamelCase_ ,lowerCamelCase_ )
auto_class.register(lowerCamelCase_ ,lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
auto_class.register(lowerCamelCase_ ,lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
A = BertModelTester(self ).get_config()
A = NewModelConfig(**tiny_config.to_dict() )
A = auto_class.from_config(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase_ )
A = auto_class.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCamelCase__ ( self ) -> int:
with self.assertRaisesRegex(
lowerCamelCase_ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
A = TFAutoModel.from_pretrained("""bert-base""" )
def UpperCamelCase__ ( self ) -> Dict:
with self.assertRaisesRegex(
lowerCamelCase_ ,r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A = TFAutoModel.from_pretrained(lowerCamelCase_ ,revision="""aaaaaa""" )
def UpperCamelCase__ ( self ) -> List[str]:
with self.assertRaisesRegex(
lowerCamelCase_ ,"""hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" ,):
A = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCamelCase__ ( self ) -> List[Any]:
with self.assertRaisesRegex(lowerCamelCase_ ,"""Use `from_pt=True` to load this model""" ):
A = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def UpperCamelCase__ ( self ) -> str:
# Make sure we have cached the model.
A = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
# With a sharded checkpoint
A = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
A = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 617 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = LlamaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = LlamaModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
lowercase = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = True
lowercase = True
lowercase = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
lowercase = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase : List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : int = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LlamaModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'single_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'multi_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(snake_case )
lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 10] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = LlamaModel(snake_case )
original_model.to(snake_case )
original_model.eval()
lowercase = original_model(snake_case ).last_hidden_state
lowercase = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {'type': scaling_type, 'factor': 10.0}
lowercase = LlamaModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
lowercase = scaled_model(snake_case ).last_hidden_state
lowercase = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
lowercase = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowercase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowercase = model(torch.tensor(snake_case ) )
lowercase = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowercase = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowercase = 'Simply put, the theory of relativity states that '
lowercase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowercase = tokenizer.encode(snake_case , return_tensors='pt' )
lowercase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=snake_case )
# greedy generation outputs
lowercase = model.generate(snake_case , max_new_tokens=64 , top_p=snake_case , temperature=1 , do_sample=snake_case )
lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case )
self.assertEqual(snake_case , snake_case )
| 84 | 0 |
'''simple docstring'''
def _A ( lowercase__ ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowercase__ = 0
lowercase__ = str(__SCREAMING_SNAKE_CASE )
while len(__SCREAMING_SNAKE_CASE ) != 1:
lowercase__ = [int(__SCREAMING_SNAKE_CASE ) for i in num_string]
lowercase__ = 1
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) ):
total *= numbers[i]
lowercase__ = str(__SCREAMING_SNAKE_CASE )
steps += 1
return steps
def _A ( lowercase__ ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowercase__ = 0
lowercase__ = str(__SCREAMING_SNAKE_CASE )
while len(__SCREAMING_SNAKE_CASE ) != 1:
lowercase__ = [int(__SCREAMING_SNAKE_CASE ) for i in num_string]
lowercase__ = 0
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) ):
total += numbers[i]
lowercase__ = str(__SCREAMING_SNAKE_CASE )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 | 0 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
A_ : Any = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : List[str] = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
A_ : str = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def snake_case () -> Tuple:
UpperCamelCase_: int = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCamelCase_: int = False
# source code of `config_class`
UpperCamelCase_: Optional[int] = inspect.getsource(__SCREAMING_SNAKE_CASE )
UpperCamelCase_: List[Any] = _re_checkpoint.findall(__SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCamelCase_ ,UpperCamelCase_: List[str] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase_: int = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
UpperCamelCase_: int = True
break
UpperCamelCase_: Union[str, Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase_: Union[str, Any] = '\n'.join(sorted(__SCREAMING_SNAKE_CASE ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 57 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = OpenAIGPTTokenizer
_UpperCamelCase : List[Any] = OpenAIGPTTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowercase = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase = 'lower'
lowercase = ['low', 'er</w>']
lowercase = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = tokens + ['<unk>']
lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
lowercase = 'This is a simple input'
lowercase = ['This is a simple input 1', 'This is a simple input 2']
lowercase = ('This is a simple input', 'This is a pair')
lowercase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A_ ( __lowerCamelCase ):
'''simple docstring'''
pass
| 84 | 0 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
SCREAMING_SNAKE_CASE_ = set()
return any(
node not in visited and depth_first_search(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
for node in graph )
def _lowerCamelCase ( __a, __a, __a, __a ):
visited.add(__SCREAMING_SNAKE_CASE )
rec_stk.add(__SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod() | 626 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase = 0
# Doctest custom flag to ignore output.
UpperCAmelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase = doctest.OutputChecker
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , snake_case , snake_case , snake_case )
UpperCAmelCase = CustomOutputChecker
UpperCAmelCase = HfDoctestModule
UpperCAmelCase = HfDocTestParser
| 84 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.