code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> np.ndarray:
'''simple docstring'''
if (ksize % 2) == 0:
_UpperCamelCase : Dict = ksize + 1
_UpperCamelCase : Optional[Any] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(UpperCAmelCase_ ):
for x in range(UpperCAmelCase_ ):
# distance from center
_UpperCamelCase : Union[str, Any] = x - ksize // 2
_UpperCamelCase : int = y - ksize // 2
# degree to radiant
_UpperCamelCase : List[Any] = theta / 1_8_0 * np.pi
_UpperCamelCase : Any = np.cos(_theta )
_UpperCamelCase : List[str] = np.sin(_theta )
# get kernel x
_UpperCamelCase : List[str] = cos_theta * px + sin_theta * py
# get kernel y
_UpperCamelCase : int = -sin_theta * px + cos_theta * py
# fill kernel
_UpperCamelCase : List[str] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCAmelCase__ = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
lowerCAmelCase__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCAmelCase__ = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
lowerCAmelCase__ = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCAmelCase__ = out / out.max() * 2_5_5
lowerCAmelCase__ = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 648
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 3_2
def lowerCamelCase_ ( UpperCAmelCase_ : Accelerator , UpperCAmelCase_ : int = 1_6 ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained('bert-base-cased' )
_UpperCamelCase : Union[str, Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCamelCase : Optional[Any] = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCamelCase : int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCamelCase : List[str] = 1_6
elif accelerator.mixed_precision != "no":
_UpperCamelCase : List[str] = 8
else:
_UpperCamelCase : int = None
return tokenizer.pad(
UpperCAmelCase_ , padding='longest' , max_length=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_tensors='pt' , )
# Instantiate dataloaders.
_UpperCamelCase : int = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
_UpperCamelCase : Any = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str ) -> Any:
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCAmelCase_ ) == "1":
_UpperCamelCase : Any = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_UpperCamelCase : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
_UpperCamelCase : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase : Any = config['lr']
_UpperCamelCase : int = int(config['num_epochs'] )
_UpperCamelCase : int = int(config['seed'] )
_UpperCamelCase : Dict = int(config['batch_size'] )
set_seed(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Dict = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Dict = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_UpperCamelCase : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCamelCase : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
_UpperCamelCase : Any = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase : int = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase : int = AdamW(params=model.parameters() , lr=UpperCAmelCase_ )
# Instantiate scheduler
_UpperCamelCase : int = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_UpperCamelCase : Any = os.path.split(UpperCAmelCase_ )[-1].split('.' )[0]
accelerator.init_trackers(UpperCAmelCase_ , UpperCAmelCase_ )
# Now we train the model
for epoch in range(UpperCAmelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_UpperCamelCase : Optional[int] = 0
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCamelCase : str = model(**UpperCAmelCase_ )
_UpperCamelCase : List[str] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_UpperCamelCase : int = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase : List[str] = model(**UpperCAmelCase_ )
_UpperCamelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
_UpperCamelCase : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(UpperCAmelCase_ ),
'epoch': epoch,
} , step=UpperCAmelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=UpperCAmelCase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
_UpperCamelCase : Dict = parser.parse_args()
_UpperCamelCase : Tuple = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 648
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 1
|
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase_ : list[float] ) -> float:
'''simple docstring'''
_UpperCamelCase : str = 0.0_0
_UpperCamelCase : str = 0
for resistor in resistors:
if resistor <= 0:
_UpperCamelCase : Union[str, Any] = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(UpperCAmelCase_ )
first_sum += 1 / float(UpperCAmelCase_ )
index += 1
return 1 / first_sum
def lowerCamelCase_ ( UpperCAmelCase_ : list[float] ) -> float:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 0.0_0
_UpperCamelCase : Union[str, Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_UpperCamelCase : Dict = F'''Resistor at index {index} has a negative value!'''
raise ValueError(UpperCAmelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 1
|
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase__ = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase__ = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase__ = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ) -> Dict:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
_UpperCamelCase : Optional[Any] = new_id
# turn into Numpy arrays
_UpperCamelCase : Dict = np.array(UpperCAmelCase_ )
_UpperCamelCase : Tuple = np.array(UpperCAmelCase_ )
if reduce_labels:
_UpperCamelCase : Dict = 2_5_5
_UpperCamelCase : Tuple = label - 1
_UpperCamelCase : Any = 2_5_5
_UpperCamelCase : Any = label != ignore_index
_UpperCamelCase : int = np.not_equal(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = pred_label[mask]
_UpperCamelCase : Any = np.array(UpperCAmelCase_ )[mask]
_UpperCamelCase : Optional[int] = pred_label[pred_label == label]
_UpperCamelCase : Any = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
_UpperCamelCase : Optional[Any] = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
_UpperCamelCase : Optional[int] = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
_UpperCamelCase : Dict = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
_UpperCamelCase : Dict = np.zeros((num_labels,) , dtype=np.floataa )
_UpperCamelCase : int = np.zeros((num_labels,) , dtype=np.floataa )
_UpperCamelCase : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Union[str, Any] = intersect_and_union(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = total_intersect_and_union(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# compute metrics
_UpperCamelCase : Dict = {}
_UpperCamelCase : List[Any] = total_area_intersect.sum() / total_area_label.sum()
_UpperCamelCase : Union[str, Any] = total_area_intersect / total_area_union
_UpperCamelCase : str = total_area_intersect / total_area_label
_UpperCamelCase : Union[str, Any] = np.nanmean(UpperCAmelCase_ )
_UpperCamelCase : int = np.nanmean(UpperCAmelCase_ )
_UpperCamelCase : List[str] = all_acc
_UpperCamelCase : List[str] = iou
_UpperCamelCase : Tuple = acc
if nan_to_num is not None:
_UpperCamelCase : Dict = {metric: np.nan_to_num(UpperCAmelCase_ , nan=UpperCAmelCase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16'))),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16'))),
}) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = None , __snake_case = None , __snake_case = False , ):
_UpperCamelCase : str = mean_iou(
results=__snake_case , gt_seg_maps=__snake_case , num_labels=__snake_case , ignore_index=__snake_case , nan_to_num=__snake_case , label_map=__snake_case , reduce_labels=__snake_case , )
return iou_result
| 648
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 648
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 1
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_UpperCamelCase : int = MaskFormerConfig(backbone_config=UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
_UpperCamelCase : Tuple = 8_4_7
_UpperCamelCase : int = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
_UpperCamelCase : int = 1_5_0
_UpperCamelCase : str = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
_UpperCamelCase : List[Any] = 1_7_1
_UpperCamelCase : List[str] = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
_UpperCamelCase : str = 1_3_3
_UpperCamelCase : Union[str, Any] = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
_UpperCamelCase : Dict = 1_9
_UpperCamelCase : Optional[Any] = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
_UpperCamelCase : int = 6_5
_UpperCamelCase : Optional[int] = 'mapillary-vistas-id2label.json'
_UpperCamelCase : Union[str, Any] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_UpperCamelCase : List[str] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = dct.pop(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = val
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase : Optional[int] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_UpperCamelCase : Union[str, Any] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : int = in_proj_weight[:dim, :]
_UpperCamelCase : Union[str, Any] = in_proj_bias[: dim]
_UpperCamelCase : Dict = in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
_UpperCamelCase : Tuple = in_proj_weight[
-dim :, :
]
_UpperCamelCase : Dict = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : str = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCamelCase : Tuple = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
_UpperCamelCase : Any = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_UpperCamelCase : Union[str, Any] = in_proj_bias[:config.hidden_size]
_UpperCamelCase : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCamelCase : int = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase : Any = in_proj_weight[-hidden_size :, :]
_UpperCamelCase : int = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCamelCase : Optional[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
_UpperCamelCase : Dict = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : int = in_proj_weight[: hidden_size, :]
_UpperCamelCase : int = in_proj_bias[:config.hidden_size]
_UpperCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase : Dict = in_proj_weight[-hidden_size :, :]
_UpperCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCamelCase_ ( ) -> torch.Tensor:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCamelCase : Tuple = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = get_maskformer_config(UpperCAmelCase_ )
# load original state_dict
with open(UpperCAmelCase_ , 'rb' ) as f:
_UpperCamelCase : Dict = pickle.load(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_UpperCamelCase : Dict = create_rename_keys(UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_swin_q_k_v(UpperCAmelCase_ , config.backbone_config )
read_in_decoder_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
_UpperCamelCase : List[Any] = torch.from_numpy(UpperCAmelCase_ )
# load 🤗 model
_UpperCamelCase : str = MaskFormerForInstanceSegmentation(UpperCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(UpperCAmelCase_ , param.shape )
_UpperCamelCase , _UpperCamelCase : List[Any] = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCAmelCase_ ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
_UpperCamelCase : Optional[int] = prepare_img()
if "vistas" in model_name:
_UpperCamelCase : Union[str, Any] = 6_5
elif "cityscapes" in model_name:
_UpperCamelCase : Union[str, Any] = 6_5_5_3_5
else:
_UpperCamelCase : List[Any] = 2_5_5
_UpperCamelCase : Optional[Any] = True if 'ade' in model_name else False
_UpperCamelCase : Any = MaskFormerImageProcessor(ignore_index=UpperCAmelCase_ , reduce_labels=UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = image_processor(UpperCAmelCase_ , return_tensors='pt' )
_UpperCamelCase : Optional[int] = model(**UpperCAmelCase_ )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_UpperCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 648
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 1
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
a__ = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
a__ = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
a__ = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a__ = field(default=2 , metadata={"help": "Batch size for training."} )
a__ = field(default=2 , metadata={"help": "Batch size for evaluation."} )
a__ = field(default=0.1 , metadata={"help": "Value of weight decay."} )
a__ = field(
default=1_0_0_0_0 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
a__ = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
a__ = field(default="cosine" , metadata={"help": "Learning rate."} )
a__ = field(
default=7_5_0 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
a__ = field(
default=1_6 , metadata={"help": "Number of gradient accumulation steps."} )
a__ = field(
default=_lowercase , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
a__ = field(default=5_0_0_0_0 , metadata={"help": "Maximum number of training steps."} )
a__ = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a__ = field(default=1_0_2_4 , metadata={"help": "Sequence lengths used for training."} )
a__ = field(default=1 , metadata={"help": "Training seed."} )
a__ = field(
default=1_0_2_4 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
a__ = field(
default=_lowercase , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
a__ = field(default=_lowercase , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a__ = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a__ = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
a__ = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a__ = field(default=1_0_2_4 , metadata={"help": "Length of sequences to be evaluated."} )
a__ = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a__ = field(default=_lowercase , metadata={"help": "Number of workers used for code evaluation."} )
a__ = field(
default=_lowercase , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
a__ = field(
default=_lowercase , metadata={"help": "Sample from the language model's output distribution."} )
a__ = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
a__ = field(default=2_5_6 , metadata={"help": "Maximum number of newly generated tokens."} )
a__ = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
a__ = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
a__ = field(default=1_0 , metadata={"help": "Number of generations to run in parallel."} )
a__ = field(
default=2_0_0 , metadata={"help": "Number of completions to generate for each sample."} )
a__ = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
a__ = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
a__ = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
a__ = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default=_lowercase , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
a__ = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
a__ = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
a__ = field(
default=1_0_0_0_0_0 , metadata={"help": "Number of files to save per JSON output file."} )
a__ = field(default="content" , metadata={"help": "Column containing text data to process."} )
a__ = field(
default=1_0_0_0 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
a__ = field(
default=1_0_0 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
a__ = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
a__ = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
a__ = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
a__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
a__ = field(
default=_lowercase , metadata={"help": "If True, near-duplicate samples are removed."} )
a__ = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
a__ = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
a__ = field(default="content" , metadata={"help": "Column containing text data to process."} )
a__ = field(default=2_0_0_0_0_0 , metadata={"help": "Number of examples to train tokenizer on."} )
a__ = field(
default=3_2_7_6_8 , metadata={"help": "Number of examples to train the tokenizer on."} )
a__ = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
a__ = field(default=_lowercase , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
a__ = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
a__ = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
a__ = field(default=_lowercase , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
a__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
a__ = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
a__ = field(default=_lowercase , metadata={"help": "Push saved tokenizer to the hub."} )
| 648
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 1
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = list(s_dict.keys() )
for key in keys:
_UpperCamelCase : Optional[int] = R'.*/layers_(\d+)'
_UpperCamelCase : int = key
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[str] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCAmelCase_ )
_UpperCamelCase : Tuple = R'(encoder|decoder)\/'
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = re.match(UpperCAmelCase_ , UpperCAmelCase_ ).groups()
if groups[0] == "encoder":
_UpperCamelCase : Tuple = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCAmelCase_ )
elif groups[0] == "decoder":
_UpperCamelCase : int = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_UpperCamelCase : Optional[int] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F'''{key} -> {new_key}''' )
_UpperCamelCase : Union[str, Any] = s_dict.pop(UpperCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCamelCase : Optional[Any] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCamelCase : Any = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_UpperCamelCase : Any = s_dict[key].shape[0]
_UpperCamelCase : Union[str, Any] = s_dict[key]
for idx in range(UpperCAmelCase_ ):
_UpperCamelCase : Any = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase_ )
return s_dict
lowerCAmelCase__ = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCAmelCase_ , 'r' ) as f:
_UpperCamelCase : str = f.read()
_UpperCamelCase : Optional[int] = re.findall(R'(.*) = ([0-9.]*)' , UpperCAmelCase_ )
_UpperCamelCase : int = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_UpperCamelCase : List[Any] = float(UpperCAmelCase_ ) if '.' in value else int(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCAmelCase_ )[0]
_UpperCamelCase : Union[str, Any] = str(activation[1] )
_UpperCamelCase : Any = num_experts
_UpperCamelCase : Optional[Any] = SwitchTransformersConfig(**UpperCAmelCase_ )
return config
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Dict="./" , UpperCAmelCase_ : Optional[int]=8 ) -> Optional[int]:
'''simple docstring'''
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_UpperCamelCase : Tuple = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
if gin_file is not None:
_UpperCamelCase : str = convert_gin_to_config(UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Optional[Any] = SwitchTransformersConfig.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = SwitchTransformersForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : Dict = flax_params['target']
_UpperCamelCase : str = flatten_dict(UpperCAmelCase_ , sep='/' )
_UpperCamelCase : Dict = rename_keys(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = unflatten_dict(UpperCAmelCase_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
lowerCAmelCase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 648
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 1
|
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ) -> bool:
'''simple docstring'''
_UpperCamelCase : List[Any] = len(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_UpperCamelCase : str = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_UpperCamelCase : int = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_UpperCamelCase : Any = subset[i - 1][j]
if arr[i - 1] <= j:
_UpperCamelCase : Tuple = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 1
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 1
|
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCamelCase_ ( UpperCAmelCase_ : Iterable[str] , UpperCAmelCase_ : int ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
_UpperCamelCase : List[str] = iter(UpperCAmelCase_ )
while True:
_UpperCamelCase : Tuple = tuple(itertools.islice(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not chunk:
return
yield chunk
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
_UpperCamelCase : Dict = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_UpperCamelCase : Any = ''
if len(UpperCAmelCase_ ) < 2:
return dirty
for i in range(len(UpperCAmelCase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCAmelCase_ ) & 1:
clean += "X"
return clean
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> list[str]:
'''simple docstring'''
_UpperCamelCase : Tuple = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_UpperCamelCase : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCAmelCase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCAmelCase_ )
return table
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
_UpperCamelCase : str = generate_table(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = prepare_input(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
_UpperCamelCase , _UpperCamelCase : str = divmod(table.index(UpperCAmelCase_ ) , 5 )
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
_UpperCamelCase : Any = generate_table(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = divmod(table.index(UpperCAmelCase_ ) , 5 )
_UpperCamelCase , _UpperCamelCase : List[str] = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 648
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 1
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : float | Decimal , UpperCAmelCase_ : float = 1_0**-1_0 ) -> float:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = a
while True:
_UpperCamelCase : Optional[Any] = Decimal(UpperCAmelCase_ ) - (
Decimal(eval(UpperCAmelCase_ ) ) / Decimal(eval(str(diff(UpperCAmelCase_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(UpperCAmelCase_ ) ) < precision: # noqa: S307
return float(UpperCAmelCase_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 648
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 1
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , *__snake_case , **__snake_case):
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case)
| 648
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 1
|
lowerCAmelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = [False] * len(UpperCAmelCase_ )
_UpperCamelCase : str = [s]
_UpperCamelCase : int = True
while queue:
_UpperCamelCase : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) -> str:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [-1] * (len(UpperCAmelCase_ ))
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Union[str, Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Any = float('Inf' )
_UpperCamelCase : Tuple = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : Optional[Any] = min(UpperCAmelCase_ , graph[parent[s]][s] )
_UpperCamelCase : Tuple = parent[s]
max_flow += path_flow
_UpperCamelCase : Optional[Any] = sink
while v != source:
_UpperCamelCase : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : List[str] = parent[v]
for i in range(len(UpperCAmelCase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 1
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowercase ( _lowercase ):
"""simple docstring"""
def __lt__( self , __snake_case):
return self[-1] < other[-1]
def __eq__( self , __snake_case):
return self[-1] == other[-1]
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
_UpperCamelCase : list[Stack] = []
# sort into stacks
for element in collection:
_UpperCamelCase : Any = Stack([element] )
_UpperCamelCase : Optional[int] = bisect_left(UpperCAmelCase_ , UpperCAmelCase_ )
if i != len(UpperCAmelCase_ ):
stacks[i].append(UpperCAmelCase_ )
else:
stacks.append(UpperCAmelCase_ )
# use a heap-based merge to merge stack efficiently
_UpperCamelCase : str = merge(*(reversed(UpperCAmelCase_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 648
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 1
|
import enum
import shutil
import sys
lowerCAmelCase__ , lowerCAmelCase__ = shutil.get_terminal_size()
lowerCAmelCase__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class lowercase ( enum.Enum ):
"""simple docstring"""
a__ = 0
a__ = 1
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple="" ) -> Union[str, Any]:
'''simple docstring'''
sys.stdout.write(str(UpperCAmelCase_ ) + end )
sys.stdout.flush()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple="" ) -> Any:
'''simple docstring'''
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , UpperCAmelCase_ )
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
forceWrite('\r' )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def lowerCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 648
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 1
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self):
_UpperCamelCase : Dict = XLMRobertaModel.from_pretrained('xlm-roberta-base')
_UpperCamelCase : Optional[int] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]])
# The dog is cute and lives in the garden house
_UpperCamelCase : str = torch.Size((1, 12, 7_68)) # batch_size, sequence_length, embedding_vector_dim
_UpperCamelCase : Optional[int] = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCamelCase : List[Any] = model(__snake_case)['last_hidden_state'].detach()
self.assertEqual(output.shape , __snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __snake_case , atol=1e-3))
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = XLMRobertaModel.from_pretrained('xlm-roberta-large')
_UpperCamelCase : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]])
# The dog is cute and lives in the garden house
_UpperCamelCase : Dict = torch.Size((1, 12, 10_24)) # batch_size, sequence_length, embedding_vector_dim
_UpperCamelCase : List[str] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)['last_hidden_state'].detach()
self.assertEqual(output.shape , __snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __snake_case , atol=1e-3))
| 648
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 1
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
| 1
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case=None):
super().__init__(
__snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , index=__snake_case , init_retrieval=__snake_case , )
_UpperCamelCase : List[str] = None
def A__ ( self , __snake_case):
logger.info('initializing retrieval')
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized')
# needs to be set manually
_UpperCamelCase : Any = self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCamelCase : Optional[Any] = str(distributed_port + 1)
_UpperCamelCase : List[str] = dist.new_group(ranks=__snake_case , backend='gloo')
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main')
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def A__ ( self):
return dist.get_rank(group=self.process_group) == 0
def A__ ( self , __snake_case , __snake_case , __snake_case=torch.floataa):
_UpperCamelCase : List[Any] = torch.empty(__snake_case , dtype=__snake_case)
dist.scatter(__snake_case , src=0 , scatter_list=__snake_case , group=self.process_group)
return target_tensor
def A__ ( self):
_UpperCamelCase : Tuple = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCamelCase : int = next((addr for addr in addrs if addr.startswith('e')) , __snake_case)
return ifname
def A__ ( self , __snake_case , __snake_case):
# single GPU training
if not dist.is_initialized():
_UpperCamelCase , _UpperCamelCase : Optional[int] = self._main_retrieve(__snake_case , __snake_case)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__snake_case)
# distributed training
_UpperCamelCase : Dict = dist.get_world_size(group=self.process_group)
# gather logic
_UpperCamelCase : int = None
if self._is_main():
_UpperCamelCase : List[str] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(__snake_case)]
dist.gather(torch.tensor(__snake_case) , dst=0 , gather_list=__snake_case , group=self.process_group)
# scatter logic
_UpperCamelCase : str = question_hidden_states.shape[0]
_UpperCamelCase : Any = []
_UpperCamelCase : Optional[int] = []
if self._is_main():
assert len(__snake_case) == world_size
_UpperCamelCase , _UpperCamelCase : int = self._main_retrieve(torch.cat(__snake_case).numpy() , __snake_case)
_UpperCamelCase , _UpperCamelCase : int = torch.tensor(__snake_case), torch.tensor(__snake_case)
_UpperCamelCase : Tuple = self._chunk_tensor(__snake_case , __snake_case)
_UpperCamelCase : str = self._chunk_tensor(__snake_case , __snake_case)
_UpperCamelCase : Dict = self._scattered(__snake_case , [n_queries, n_docs] , target_type=torch.intaa)
_UpperCamelCase : Optional[Any] = self._scattered(__snake_case , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__snake_case)
| 648
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase__ = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/'))
_UpperCamelCase : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(__snake_case , 'src/diffusers/schedulers/scheduling_ddpm.py') , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py') , )
def A__ ( self):
_UpperCamelCase : Tuple = 'src/diffusers'
shutil.rmtree(self.diffusers_dir)
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None):
_UpperCamelCase : List[str] = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_UpperCamelCase : Union[str, Any] = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_UpperCamelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19)
_UpperCamelCase : List[Any] = black.format_str(__snake_case , mode=__snake_case)
_UpperCamelCase : Dict = os.path.join(self.diffusers_dir , 'new_code.py')
with open(__snake_case , 'w' , newline='\n') as f:
f.write(__snake_case)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__snake_case)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=__snake_case)
with open(__snake_case , 'r') as f:
self.assertTrue(f.read() , __snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput')
self.assertEqual(__snake_case , __snake_case)
def A__ ( self):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , __snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , __snake_case) , )
# Copy consistency with a really long name
_UpperCamelCase : Dict = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , f'''{long_class_name}SchedulerOutput''' , re.sub('Bert' , __snake_case , __snake_case) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , __snake_case , overwrite_result=re.sub('DDPM' , 'Test' , __snake_case) , )
| 648
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
| 1
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=30 , __snake_case=2 , __snake_case=3 , __snake_case=True , __snake_case=True , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=10 , __snake_case=0.0_2 , ):
_UpperCamelCase : List[str] = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Tuple = use_labels
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : List[str] = num_hidden_layers
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : Dict = hidden_act
_UpperCamelCase : Optional[int] = hidden_dropout_prob
_UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCamelCase : Tuple = type_sequence_label_size
_UpperCamelCase : Dict = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : List[str] = (image_size // patch_size) ** 2
_UpperCamelCase : Any = num_patches + 1
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : Any = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, pixel_values
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : Dict = FlaxViTModel(config=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Union[str, Any] = (self.image_size, self.image_size)
_UpperCamelCase : List[str] = (self.patch_size, self.patch_size)
_UpperCamelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size))
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.type_sequence_label_size
_UpperCamelCase : Union[str, Any] = FlaxViTForImageClassification(config=__snake_case)
_UpperCamelCase : Union[str, Any] = model(__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_UpperCamelCase : Any = 1
_UpperCamelCase : int = FlaxViTForImageClassification(__snake_case)
_UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase : Any = model(__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Dict = config_and_inputs
_UpperCamelCase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def A__ ( self):
_UpperCamelCase : List[str] = FlaxViTModelTester(self)
_UpperCamelCase : Optional[int] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[int] = model_class(__snake_case)
_UpperCamelCase : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase : Optional[int] = self._prepare_for_class(__snake_case , __snake_case)
_UpperCamelCase : Optional[int] = model_class(__snake_case)
@jax.jit
def model_jitted(__snake_case , **__snake_case):
return model(pixel_values=__snake_case , **__snake_case)
with self.subTest('JIT Enabled'):
_UpperCamelCase : Optional[Any] = model_jitted(**__snake_case).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
_UpperCamelCase : Optional[int] = model_jitted(**__snake_case).to_tuple()
self.assertEqual(len(__snake_case) , len(__snake_case))
for jitted_output, output in zip(__snake_case , __snake_case):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def A__ ( self):
for model_class_name in self.all_model_classes:
_UpperCamelCase : Any = model_class_name.from_pretrained('google/vit-base-patch16-224')
_UpperCamelCase : List[str] = model(np.ones((1, 3, 2_24, 2_24)))
self.assertIsNotNone(__snake_case)
| 648
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = IFInpaintingSuperResolutionPipeline
a__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
a__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
a__ = PipelineTesterMixin.required_optional_params - {"latents"}
def A__ ( self):
return self._get_superresolution_dummy_components()
def A__ ( self , __snake_case , __snake_case=0):
if str(__snake_case).startswith('mps'):
_UpperCamelCase : List[str] = torch.manual_seed(__snake_case)
else:
_UpperCamelCase : Any = torch.Generator(device=__snake_case).manual_seed(__snake_case)
_UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__snake_case)).to(__snake_case)
_UpperCamelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case)).to(__snake_case)
_UpperCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case)).to(__snake_case)
_UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 648
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = None
a__ = None
a__ = None
a__ = None
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case=5_12 , __snake_case="cls" , __snake_case=False , __snake_case=True , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
_UpperCamelCase : str = project_dim
_UpperCamelCase : Any = pooler_fn
_UpperCamelCase : Any = learn_encoder
_UpperCamelCase : Optional[Any] = use_attention_mask
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = [r"pooler", r"logit_scale"]
a__ = [r"position_ids", r"predictions.decoder.bias"]
a__ = "roberta"
a__ = RobertaSeriesConfig
def __init__( self , __snake_case):
super().__init__(__snake_case)
_UpperCamelCase : Optional[Any] = XLMRobertaModel(__snake_case)
_UpperCamelCase : List[Any] = nn.Linear(config.hidden_size , config.project_dim)
_UpperCamelCase : Any = getattr(__snake_case , 'has_pre_transformation' , __snake_case)
if self.has_pre_transformation:
_UpperCamelCase : Optional[Any] = nn.Linear(config.hidden_size , config.project_dim)
_UpperCamelCase : str = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps)
self.post_init()
def A__ ( self , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , ):
_UpperCamelCase : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase : Optional[int] = self.base_model(
input_ids=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , position_ids=__snake_case , head_mask=__snake_case , inputs_embeds=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , output_attentions=__snake_case , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__snake_case , )
if self.has_pre_transformation:
_UpperCamelCase : Any = outputs['hidden_states'][-2]
_UpperCamelCase : List[Any] = self.pre_LN(__snake_case)
_UpperCamelCase : Union[str, Any] = self.transformation_pre(__snake_case)
return TransformationModelOutput(
projection_state=__snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_UpperCamelCase : Tuple = self.transformation(outputs.last_hidden_state)
return TransformationModelOutput(
projection_state=__snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 648
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 1
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar("""T""")
class lowercase ( Generic[T] ):
"""simple docstring"""
a__ = 42 # Cache store of keys
a__ = 42 # References of the keys in cache
a__ = 1_0 # Maximum capacity of cache
def __init__( self , __snake_case):
_UpperCamelCase : str = deque()
_UpperCamelCase : str = set()
if not n:
_UpperCamelCase : Union[str, Any] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.')
else:
_UpperCamelCase : Tuple = n
def A__ ( self , __snake_case):
if x not in self.key_reference:
if len(self.dq_store) == LRUCache._MAX_CAPACITY:
_UpperCamelCase : Any = self.dq_store.pop()
self.key_reference.remove(__snake_case)
else:
self.dq_store.remove(__snake_case)
self.dq_store.appendleft(__snake_case)
self.key_reference.add(__snake_case)
def A__ ( self):
for k in self.dq_store:
print(__snake_case)
def __repr__( self):
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store)}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 648
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ) -> Any:
'''simple docstring'''
_UpperCamelCase : List[str] = tmp_path / 'cache'
_UpperCamelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCamelCase : int = SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_sql_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : List[Any] = tmp_path / 'cache'
_UpperCamelCase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_UpperCamelCase : Optional[Any] = features.copy() if features else default_expected_features
_UpperCamelCase : Optional[Any] = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase : int = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_sql_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with contextlib.closing(sqlitea.connect(UpperCAmelCase_ ) ) as con:
_UpperCamelCase : Dict = con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = tmp_path / 'cache'
_UpperCamelCase : List[Any] = os.path.join(UpperCAmelCase_ , 'tmp.sql' )
_UpperCamelCase : Tuple = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=UpperCAmelCase_ ).read()
SqlDatasetWriter(UpperCAmelCase_ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
_UpperCamelCase : Any = iter_sql_file(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = iter_sql_file(UpperCAmelCase_ )
for rowa, rowa in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = tmp_path / 'cache'
_UpperCamelCase : Tuple = os.path.join(UpperCAmelCase_ , 'tmp.sql' )
_UpperCamelCase : Optional[Any] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=UpperCAmelCase_ ).read()
SqlDatasetWriter(UpperCAmelCase_ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
_UpperCamelCase : str = iter_sql_file(UpperCAmelCase_ )
_UpperCamelCase : List[str] = iter_sql_file(UpperCAmelCase_ )
for rowa, rowa in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : str = tmp_path / 'cache'
_UpperCamelCase : Any = os.path.join(UpperCAmelCase_ , 'tmp.sql' )
_UpperCamelCase : List[Any] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=UpperCAmelCase_ ).read()
with pytest.raises(UpperCAmelCase_ ):
SqlDatasetWriter(UpperCAmelCase_ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 648
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 1
|
from __future__ import annotations
from collections.abc import Generator
def lowerCamelCase_ ( ) -> Generator[int, None, None]:
'''simple docstring'''
_UpperCamelCase : dict[int, int] = {}
_UpperCamelCase : Tuple = 2
while True:
_UpperCamelCase : int = factor_map.pop(UpperCAmelCase_ , UpperCAmelCase_ )
if factor:
_UpperCamelCase : Any = factor + prime
while x in factor_map:
x += factor
_UpperCamelCase : Optional[int] = factor
else:
_UpperCamelCase : Any = prime
yield prime
prime += 1
def lowerCamelCase_ ( UpperCAmelCase_ : float = 1e1_0 ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = sieve()
_UpperCamelCase : Optional[int] = 1
while True:
_UpperCamelCase : List[str] = next(UpperCAmelCase_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCAmelCase_ )
n += 2
if __name__ == "__main__":
print(solution())
| 648
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 1
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , *__snake_case , **__snake_case):
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case)
| 648
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> bool:
'''simple docstring'''
_UpperCamelCase : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> tuple[int, int]:
'''simple docstring'''
_UpperCamelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCamelCase : int = x_den * y_den * z_den
_UpperCamelCase : int = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase_ ( UpperCAmelCase_ : int = 3_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : set = set()
_UpperCamelCase : int
_UpperCamelCase : Fraction = Fraction(0 )
_UpperCamelCase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_UpperCamelCase : Dict = x_num * y_den + x_den * y_num
_UpperCamelCase : int = x_den * y_den
_UpperCamelCase : int = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCamelCase : Any = add_three(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
unique_s.add(UpperCAmelCase_ )
# n=2
_UpperCamelCase : List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCamelCase : List[str] = x_den * x_den * y_den * y_den
if is_sq(UpperCAmelCase_ ) and is_sq(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = int(sqrt(UpperCAmelCase_ ) )
_UpperCamelCase : Optional[Any] = int(sqrt(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCamelCase : str = add_three(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
unique_s.add(UpperCAmelCase_ )
# n=-1
_UpperCamelCase : Optional[int] = x_num * y_num
_UpperCamelCase : int = x_den * y_num + x_num * y_den
_UpperCamelCase : Tuple = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCamelCase : List[str] = add_three(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
unique_s.add(UpperCAmelCase_ )
# n=2
_UpperCamelCase : List[Any] = x_num * x_num * y_num * y_num
_UpperCamelCase : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(UpperCAmelCase_ ) and is_sq(UpperCAmelCase_ ):
_UpperCamelCase : Any = int(sqrt(UpperCAmelCase_ ) )
_UpperCamelCase : Tuple = int(sqrt(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCamelCase : Optional[Any] = add_three(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
unique_s.add(UpperCAmelCase_ )
for num, den in unique_s:
total += Fraction(UpperCAmelCase_ , UpperCAmelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 1
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
print('Generating primitive root of p' )
while True:
_UpperCamelCase : Optional[int] = random.randrange(3 , UpperCAmelCase_ )
if pow(UpperCAmelCase_ , 2 , UpperCAmelCase_ ) == 1:
continue
if pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) == 1:
continue
return g
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
'''simple docstring'''
print('Generating prime p...' )
_UpperCamelCase : List[Any] = rabin_miller.generate_large_prime(UpperCAmelCase_ ) # select large prime number.
_UpperCamelCase : Tuple = primitive_root(UpperCAmelCase_ ) # one primitive root on modulo p.
_UpperCamelCase : Optional[int] = random.randrange(3 , UpperCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
_UpperCamelCase : Optional[int] = cryptomath.find_mod_inverse(pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
_UpperCamelCase : List[str] = (key_size, e_a, e_a, p)
_UpperCamelCase : List[str] = (key_size, d)
return public_key, private_key
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ) -> None:
'''simple docstring'''
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = generate_key(UpperCAmelCase_ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , 'w' ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , 'w' ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
print('Making key files...' )
make_key_files('elgamal' , 2_0_4_8 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 648
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 1
|
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
_UpperCamelCase : list = []
for temp in range(int(UpperCAmelCase_ ) ):
series.append(F'''1/{temp + 1}''' if series else '1' )
return series
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 648
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 1
|
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if len(UpperCAmelCase_ ) < k or k < 0:
raise ValueError('Invalid Input' )
_UpperCamelCase : int = sum(array[:k] )
for i in range(len(UpperCAmelCase_ ) - k ):
_UpperCamelCase : int = current_sum - array[i] + array[i + k]
_UpperCamelCase : str = max(UpperCAmelCase_ , UpperCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase__ = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
lowerCAmelCase__ = randint(0, 1_1_0)
print(f'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 3_2
def lowerCamelCase_ ( UpperCAmelCase_ : Accelerator , UpperCAmelCase_ : int = 1_6 , UpperCAmelCase_ : str = "bert-base-cased" ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCamelCase : int = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_UpperCamelCase : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
_UpperCamelCase : Any = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
_UpperCamelCase : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase : int = config['lr']
_UpperCamelCase : Union[str, Any] = int(config['num_epochs'] )
_UpperCamelCase : Optional[int] = int(config['seed'] )
_UpperCamelCase : Dict = int(config['batch_size'] )
_UpperCamelCase : Tuple = args.model_name_or_path
set_seed(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase : Any = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
# Instantiate optimizer
_UpperCamelCase : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCamelCase : int = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
_UpperCamelCase : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = (len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCamelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase_ , )
else:
_UpperCamelCase : Any = DummyScheduler(UpperCAmelCase_ , total_num_steps=UpperCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase : int = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCamelCase : Union[str, Any] = 0
# Now we train the model
_UpperCamelCase : str = evaluate.load('glue' , 'mrpc' )
_UpperCamelCase : Dict = 0
_UpperCamelCase : List[str] = {}
for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = model(**UpperCAmelCase_ )
_UpperCamelCase : List[str] = outputs.loss
_UpperCamelCase : Any = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_UpperCamelCase : Optional[int] = 0
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase : List[str] = model(**UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCamelCase , _UpperCamelCase : str = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCamelCase : Tuple = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
_UpperCamelCase : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase_ )
_UpperCamelCase : Any = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
_UpperCamelCase : Optional[Any] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase_ , default=3 , help='Number of train epochs.' , )
_UpperCamelCase : Tuple = parser.parse_args()
_UpperCamelCase : Optional[int] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 648
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 1
|
from math import factorial
lowerCAmelCase__ = {str(digit): factorial(digit) for digit in range(1_0)}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(UpperCAmelCase_ ) )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 6_0 , UpperCAmelCase_ : int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
_UpperCamelCase : str = 0
# the cached sizes of the previous chains
_UpperCamelCase : dict[int, int] = {}
for start_chain_element in range(1 , UpperCAmelCase_ ):
# The temporary set will contain the elements of the chain
_UpperCamelCase : int = set()
_UpperCamelCase : Optional[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_UpperCamelCase : int = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(UpperCAmelCase_ )
chain_set_length += 1
_UpperCamelCase : Optional[int] = digit_factorial_sum(UpperCAmelCase_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_UpperCamelCase : List[str] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution()}')
| 648
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 1
|
from functools import lru_cache
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> set:
'''simple docstring'''
_UpperCamelCase : Dict = 2
_UpperCamelCase : Optional[int] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCAmelCase_ )
if n > 1:
factors.add(UpperCAmelCase_ )
return factors
@lru_cache
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
return len(unique_prime_factors(UpperCAmelCase_ ) )
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> bool:
'''simple docstring'''
return len(set(UpperCAmelCase_ ) ) in (0, 1)
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> list:
'''simple docstring'''
_UpperCamelCase : List[Any] = 2
while True:
# Increment each value of a generated range
_UpperCamelCase : str = [base + i for i in range(UpperCAmelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_UpperCamelCase : Union[str, Any] = [upf_len(UpperCAmelCase_ ) for x in group]
checker.append(UpperCAmelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCAmelCase_ ):
return group
# Increment our base variable by 1
base += 1
def lowerCamelCase_ ( UpperCAmelCase_ : int = 4 ) -> int:
'''simple docstring'''
_UpperCamelCase : Any = run(UpperCAmelCase_ )
return results[0] if len(UpperCAmelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 648
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase__ = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase__ = BASE_URL + """/user"""
# https://github.com/settings/tokens
lowerCAmelCase__ = os.environ.get("""USER_TOKEN""", """""")
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> dict[Any, Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = {
'Authorization': F'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 648
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 1
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """T5Config"""
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "mt5"
a__ = MTaConfig
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "mt5"
a__ = MTaConfig
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "mt5"
a__ = MTaConfig
| 648
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 1
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 648
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 1
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase__ = datasets.utils.logging.get_logger(__name__)
class lowercase ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
a__ = None
a__ = None
class lowercase ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
a__ = datasets.Audio()
a__ = "audio"
a__ = AudioFolderConfig
a__ = 42 # definition at the bottom of the script
a__ = AudioClassification(audio_column="audio" , label_column="label" )
lowerCAmelCase__ = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowerCAmelCase__ = AUDIO_EXTENSIONS
| 648
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
| 1
|
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
assert x is not None
assert y is not None
_UpperCamelCase : List[str] = len(UpperCAmelCase_ )
_UpperCamelCase : Tuple = len(UpperCAmelCase_ )
# declaring the array for storing the dp values
_UpperCamelCase : Tuple = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_UpperCamelCase : List[Any] = 1 if x[i - 1] == y[j - 1] else 0
_UpperCamelCase : Any = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_UpperCamelCase : int = ''
_UpperCamelCase , _UpperCamelCase : Optional[int] = m, n
while i > 0 and j > 0:
_UpperCamelCase : Any = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_UpperCamelCase : Optional[Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCAmelCase__ = """AGGTAB"""
lowerCAmelCase__ = """GXTXAYB"""
lowerCAmelCase__ = 4
lowerCAmelCase__ = """GTAB"""
lowerCAmelCase__ , lowerCAmelCase__ = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 648
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 1
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
return x + 2
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : int = 'x = 3'
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : List[Any] = evaluate(__snake_case , {} , state=__snake_case)
assert result == 3
self.assertDictEqual(__snake_case , {'x': 3})
_UpperCamelCase : List[Any] = 'x = y'
_UpperCamelCase : int = {'y': 5}
_UpperCamelCase : Any = evaluate(__snake_case , {} , state=__snake_case)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__snake_case , {'x': 5, 'y': 5})
def A__ ( self):
_UpperCamelCase : str = 'y = add_two(x)'
_UpperCamelCase : str = {'x': 3}
_UpperCamelCase : str = evaluate(__snake_case , {'add_two': add_two} , state=__snake_case)
assert result == 5
self.assertDictEqual(__snake_case , {'x': 3, 'y': 5})
# Won't work without the tool
with CaptureStdout() as out:
_UpperCamelCase : List[str] = evaluate(__snake_case , {} , state=__snake_case)
assert result is None
assert "tried to execute add_two" in out.out
def A__ ( self):
_UpperCamelCase : Any = 'x = 3'
_UpperCamelCase : List[Any] = {}
_UpperCamelCase : Union[str, Any] = evaluate(__snake_case , {} , state=__snake_case)
assert result == 3
self.assertDictEqual(__snake_case , {'x': 3})
def A__ ( self):
_UpperCamelCase : List[str] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
_UpperCamelCase : Tuple = {'x': 3}
_UpperCamelCase : Optional[Any] = evaluate(__snake_case , {'add_two': add_two} , state=__snake_case)
self.assertDictEqual(__snake_case , {'x': 3, 'y': 5})
self.assertDictEqual(__snake_case , {'x': 3, 'test_dict': {'x': 3, 'y': 5}})
def A__ ( self):
_UpperCamelCase : Optional[int] = 'x = 3\ny = 5'
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : List[Any] = evaluate(__snake_case , {} , state=__snake_case)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__snake_case , {'x': 3, 'y': 5})
def A__ ( self):
_UpperCamelCase : List[str] = 'text = f\'This is x: {x}.\''
_UpperCamelCase : Optional[int] = {'x': 3}
_UpperCamelCase : List[str] = evaluate(__snake_case , {} , state=__snake_case)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__snake_case , {'x': 3, 'text': 'This is x: 3.'})
def A__ ( self):
_UpperCamelCase : Dict = 'if x <= 3:\n y = 2\nelse:\n y = 5'
_UpperCamelCase : str = {'x': 3}
_UpperCamelCase : Tuple = evaluate(__snake_case , {} , state=__snake_case)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__snake_case , {'x': 3, 'y': 2})
_UpperCamelCase : Optional[int] = {'x': 8}
_UpperCamelCase : Dict = evaluate(__snake_case , {} , state=__snake_case)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__snake_case , {'x': 8, 'y': 5})
def A__ ( self):
_UpperCamelCase : List[Any] = 'test_list = [x, add_two(x)]'
_UpperCamelCase : List[str] = {'x': 3}
_UpperCamelCase : List[Any] = evaluate(__snake_case , {'add_two': add_two} , state=__snake_case)
self.assertListEqual(__snake_case , [3, 5])
self.assertDictEqual(__snake_case , {'x': 3, 'test_list': [3, 5]})
def A__ ( self):
_UpperCamelCase : str = 'y = x'
_UpperCamelCase : List[str] = {'x': 3}
_UpperCamelCase : Dict = evaluate(__snake_case , {} , state=__snake_case)
assert result == 3
self.assertDictEqual(__snake_case , {'x': 3, 'y': 3})
def A__ ( self):
_UpperCamelCase : str = 'test_list = [x, add_two(x)]\ntest_list[1]'
_UpperCamelCase : Optional[int] = {'x': 3}
_UpperCamelCase : str = evaluate(__snake_case , {'add_two': add_two} , state=__snake_case)
assert result == 5
self.assertDictEqual(__snake_case , {'x': 3, 'test_list': [3, 5]})
_UpperCamelCase : Dict = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
_UpperCamelCase : Union[str, Any] = {'x': 3}
_UpperCamelCase : Dict = evaluate(__snake_case , {'add_two': add_two} , state=__snake_case)
assert result == 5
self.assertDictEqual(__snake_case , {'x': 3, 'test_dict': {'x': 3, 'y': 5}})
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'x = 0\nfor i in range(3):\n x = i'
_UpperCamelCase : str = {}
_UpperCamelCase : List[str] = evaluate(__snake_case , {'range': range} , state=__snake_case)
assert result == 2
self.assertDictEqual(__snake_case , {'x': 2, 'i': 2})
| 648
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 1
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 5_0),)
def A__ ( self , **__snake_case):
_UpperCamelCase : Optional[int] = {'num_train_timesteps': 10_00}
config.update(**__snake_case)
return config
def A__ ( self , __snake_case=0 , **__snake_case):
_UpperCamelCase : Optional[int] = dict(self.forward_default_kwargs)
_UpperCamelCase : Any = kwargs.pop('num_inference_steps' , __snake_case)
_UpperCamelCase : Union[str, Any] = self.dummy_sample
_UpperCamelCase : List[Any] = 0.1 * sample
_UpperCamelCase : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : Any = self.get_scheduler_config(**__snake_case)
_UpperCamelCase : Optional[int] = scheduler_class(**__snake_case)
scheduler.set_timesteps(__snake_case)
# copy over dummy past residuals
_UpperCamelCase : List[Any] = dummy_past_residuals[:]
if time_step is None:
_UpperCamelCase : Tuple = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case)
_UpperCamelCase : List[Any] = scheduler_class.from_pretrained(__snake_case)
new_scheduler.set_timesteps(__snake_case)
# copy over dummy past residuals
_UpperCamelCase : Any = dummy_past_residuals[:]
_UpperCamelCase : Any = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case).prev_sample
_UpperCamelCase : Any = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
_UpperCamelCase : List[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case).prev_sample
_UpperCamelCase : Optional[int] = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def A__ ( self):
pass
def A__ ( self , __snake_case=0 , **__snake_case):
_UpperCamelCase : Optional[int] = dict(self.forward_default_kwargs)
_UpperCamelCase : int = kwargs.pop('num_inference_steps' , __snake_case)
_UpperCamelCase : List[Any] = self.dummy_sample
_UpperCamelCase : Union[str, Any] = 0.1 * sample
_UpperCamelCase : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : Any = self.get_scheduler_config()
_UpperCamelCase : str = scheduler_class(**__snake_case)
scheduler.set_timesteps(__snake_case)
# copy over dummy past residuals (must be after setting timesteps)
_UpperCamelCase : int = dummy_past_residuals[:]
if time_step is None:
_UpperCamelCase : str = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case)
_UpperCamelCase : Any = scheduler_class.from_pretrained(__snake_case)
# copy over dummy past residuals
new_scheduler.set_timesteps(__snake_case)
# copy over dummy past residual (must be after setting timesteps)
_UpperCamelCase : Tuple = dummy_past_residuals[:]
_UpperCamelCase : Optional[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case).prev_sample
_UpperCamelCase : Dict = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
_UpperCamelCase : Dict = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case).prev_sample
_UpperCamelCase : Any = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def A__ ( self , **__snake_case):
_UpperCamelCase : List[str] = self.scheduler_classes[0]
_UpperCamelCase : Dict = self.get_scheduler_config(**__snake_case)
_UpperCamelCase : str = scheduler_class(**__snake_case)
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : List[str] = self.dummy_model()
_UpperCamelCase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(__snake_case)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase : Union[str, Any] = model(__snake_case , __snake_case)
_UpperCamelCase : int = scheduler.step(__snake_case , __snake_case , __snake_case).prev_sample
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase : Dict = model(__snake_case , __snake_case)
_UpperCamelCase : List[Any] = scheduler.step(__snake_case , __snake_case , __snake_case).prev_sample
return sample
def A__ ( self):
_UpperCamelCase : Dict = dict(self.forward_default_kwargs)
_UpperCamelCase : Tuple = kwargs.pop('num_inference_steps' , __snake_case)
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : List[Any] = self.get_scheduler_config()
_UpperCamelCase : List[Any] = scheduler_class(**__snake_case)
_UpperCamelCase : Any = self.dummy_sample
_UpperCamelCase : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(__snake_case , 'set_timesteps'):
scheduler.set_timesteps(__snake_case)
elif num_inference_steps is not None and not hasattr(__snake_case , 'set_timesteps'):
_UpperCamelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCamelCase : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_UpperCamelCase : List[Any] = dummy_past_residuals[:]
_UpperCamelCase : Optional[int] = scheduler.timesteps[5]
_UpperCamelCase : int = scheduler.timesteps[6]
_UpperCamelCase : int = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case).prev_sample
_UpperCamelCase : List[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
_UpperCamelCase : Optional[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case).prev_sample
_UpperCamelCase : List[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def A__ ( self):
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__snake_case , time_step=__snake_case)
def A__ ( self):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=__snake_case , time_step=__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.full_loop()
_UpperCamelCase : List[Any] = torch.mean(torch.abs(__snake_case))
assert abs(result_mean.item() - 2_54_05_29) < 10
| 648
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
lowerCAmelCase__ = True
from torch.cuda.amp import autocast
lowerCAmelCase__ = logging.getLogger(__name__)
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Any=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCAmelCase_ )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a__ = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a__ = field(
default=_lowercase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
a__ = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
a__ = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
a__ = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
a__ = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
a__ = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
a__ = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default=_lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a__ = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
a__ = field(
default=_lowercase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
a__ = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
a__ = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
a__ = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
a__ = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class lowercase :
"""simple docstring"""
a__ = 42
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
def __call__( self , __snake_case):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_UpperCamelCase : Union[str, Any] = [{'input_values': feature['input_values']} for feature in features]
_UpperCamelCase : Optional[int] = [{'input_ids': feature['labels']} for feature in features]
_UpperCamelCase : int = self.processor.pad(
__snake_case , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
_UpperCamelCase : Optional[int] = self.processor.pad(
labels=__snake_case , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase : Optional[int] = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1) , -1_00)
_UpperCamelCase : Union[str, Any] = labels
return batch
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case):
model.train()
_UpperCamelCase : Optional[int] = self._prepare_inputs(__snake_case)
if self.use_amp:
with autocast():
_UpperCamelCase : Optional[int] = self.compute_loss(__snake_case , __snake_case)
else:
_UpperCamelCase : Union[str, Any] = self.compute_loss(__snake_case , __snake_case)
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase : Any = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase : List[str] = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''')
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase : Tuple = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__snake_case).backward()
elif self.use_apex:
with amp.scale_loss(__snake_case , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__snake_case)
else:
loss.backward()
return loss.detach()
def lowerCamelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase : Tuple = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase : List[Any] = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
_UpperCamelCase : List[Any] = F'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(UpperCAmelCase_ : Dict ):
_UpperCamelCase : Tuple = re.sub(UpperCAmelCase_ , '' , batch['sentence'] ).lower() + ' '
return batch
_UpperCamelCase : List[Any] = train_dataset.map(UpperCAmelCase_ , remove_columns=['sentence'] )
_UpperCamelCase : List[str] = eval_dataset.map(UpperCAmelCase_ , remove_columns=['sentence'] )
def extract_all_chars(UpperCAmelCase_ : Any ):
_UpperCamelCase : Any = ' '.join(batch['text'] )
_UpperCamelCase : int = list(set(UpperCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase : Dict = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , batch_size=-1 , keep_in_memory=UpperCAmelCase_ , remove_columns=train_dataset.column_names , )
_UpperCamelCase : Optional[Any] = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , batch_size=-1 , keep_in_memory=UpperCAmelCase_ , remove_columns=eval_dataset.column_names , )
_UpperCamelCase : Optional[Any] = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
_UpperCamelCase : Any = {v: k for k, v in enumerate(UpperCAmelCase_ )}
_UpperCamelCase : Union[str, Any] = vocab_dict[' ']
del vocab_dict[" "]
_UpperCamelCase : Union[str, Any] = len(UpperCAmelCase_ )
_UpperCamelCase : Any = len(UpperCAmelCase_ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Tuple = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
_UpperCamelCase : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0.0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ )
_UpperCamelCase : Tuple = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase : Dict = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_UpperCamelCase : Optional[Any] = train_dataset.select(range(UpperCAmelCase_ ) )
if data_args.max_val_samples is not None:
_UpperCamelCase : Any = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase : Any = torchaudio.transforms.Resample(4_8_0_0_0 , 1_6_0_0_0 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(UpperCAmelCase_ : List[str] ):
_UpperCamelCase , _UpperCamelCase : List[str] = torchaudio.load(batch['path'] )
_UpperCamelCase : Union[str, Any] = resampler(UpperCAmelCase_ ).squeeze().numpy()
_UpperCamelCase : Union[str, Any] = 1_6_0_0_0
_UpperCamelCase : Union[str, Any] = batch['text']
return batch
_UpperCamelCase : int = train_dataset.map(
UpperCAmelCase_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase : Union[str, Any] = eval_dataset.map(
UpperCAmelCase_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(UpperCAmelCase_ : List[str] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
_UpperCamelCase : int = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(UpperCAmelCase_ )
return batch
_UpperCamelCase : Any = train_dataset.map(
UpperCAmelCase_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase : List[Any] = eval_dataset.map(
UpperCAmelCase_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase : Union[str, Any] = datasets.load_metric('wer' )
def compute_metrics(UpperCAmelCase_ : Tuple ):
_UpperCamelCase : Optional[Any] = pred.predictions
_UpperCamelCase : List[Any] = np.argmax(UpperCAmelCase_ , axis=-1 )
_UpperCamelCase : Optional[Any] = processor.tokenizer.pad_token_id
_UpperCamelCase : Any = processor.batch_decode(UpperCAmelCase_ )
# we do not want to group tokens when computing the metrics
_UpperCamelCase : Tuple = processor.batch_decode(pred.label_ids , group_tokens=UpperCAmelCase_ )
_UpperCamelCase : List[Any] = wer_metric.compute(predictions=UpperCAmelCase_ , references=UpperCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase : Union[str, Any] = DataCollatorCTCWithPadding(processor=UpperCAmelCase_ , padding=UpperCAmelCase_ )
# Initialize our Trainer
_UpperCamelCase : Any = CTCTrainer(
model=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase : Dict = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase : Any = model_args.model_name_or_path
else:
_UpperCamelCase : Union[str, Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase : str = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model()
_UpperCamelCase : str = train_result.metrics
_UpperCamelCase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_UpperCamelCase : Tuple = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('train' , UpperCAmelCase_ )
trainer.save_metrics('train' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
_UpperCamelCase : Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCamelCase : Optional[int] = trainer.evaluate()
_UpperCamelCase : List[str] = data_args.max_val_samples if data_args.max_val_samples is not None else len(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 648
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 1
|
from math import loga
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 1
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCAmelCase__ = """src/transformers"""
lowerCAmelCase__ = """docs/source/en"""
lowerCAmelCase__ = """."""
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(UpperCAmelCase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : int = start_index
while not lines[end_index].startswith(UpperCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCAmelCase__ = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase__ = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
lowerCAmelCase__ = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase__ = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Tuple = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , UpperCAmelCase_ )
return [m.group(0 ) for m in matches]
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 2 if text == '✅' or text == '❌' else len(UpperCAmelCase_ )
_UpperCamelCase : Any = (width - text_length) // 2
_UpperCamelCase : Any = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : List[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Optional[int] = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : Optional[Any] = collections.defaultdict(UpperCAmelCase_ )
_UpperCamelCase : List[str] = collections.defaultdict(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = collections.defaultdict(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = collections.defaultdict(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = collections.defaultdict(UpperCAmelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase_ ):
_UpperCamelCase : int = None
if attr_name.endswith('Tokenizer' ):
_UpperCamelCase : Optional[int] = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
_UpperCamelCase : int = fast_tokenizers
_UpperCamelCase : Optional[Any] = attr_name[:-1_3]
elif _re_tf_models.match(UpperCAmelCase_ ) is not None:
_UpperCamelCase : Tuple = tf_models
_UpperCamelCase : Union[str, Any] = _re_tf_models.match(UpperCAmelCase_ ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase_ ) is not None:
_UpperCamelCase : List[Any] = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(UpperCAmelCase_ ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase_ ) is not None:
_UpperCamelCase : Any = pt_models
_UpperCamelCase : Optional[int] = _re_pt_models.match(UpperCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Optional[Any] = True
break
# Try again after removing the last word in the name
_UpperCamelCase : Any = ''.join(camel_case_split(UpperCAmelCase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Optional[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : Any = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : List[Any] = [len(UpperCAmelCase_ ) + 2 for c in columns]
_UpperCamelCase : Optional[int] = max([len(UpperCAmelCase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Optional[Any] = '|' + '|'.join([_center_text(UpperCAmelCase_ , UpperCAmelCase_ ) for c, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
_UpperCamelCase : str = {True: '✅', False: '❌'}
for name in model_names:
_UpperCamelCase : Tuple = model_name_to_prefix[name]
_UpperCamelCase : int = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase_ , UpperCAmelCase_ ) for l, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )] ) + "|\n"
return table
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(UpperCAmelCase_ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
_UpperCamelCase : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase_ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 648
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 1
|
import string
from math import logaa
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> int:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
_UpperCamelCase : Optional[Any] = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> tuple[int, int]:
'''simple docstring'''
_UpperCamelCase : int = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCamelCase : Union[str, Any] = corpus_without_punctuation.split('\n' )
_UpperCamelCase : Dict = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCAmelCase_ ))
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 648
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a__ = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a__ = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a__ = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a__ = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
a__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
a__ = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
a__ = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a__ = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
a__ = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
a__ = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a__ = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
a__ = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
a__ = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def A__ ( self):
if self.train_file is not None:
_UpperCamelCase : Tuple = self.train_file.split('.')[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCamelCase : str = self.validation_file.split('.')[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase :
"""simple docstring"""
a__ = 42
a__ = True
a__ = None
a__ = None
def __call__( self , __snake_case):
_UpperCamelCase : str = 'label' if 'label' in features[0].keys() else 'labels'
_UpperCamelCase : Tuple = [feature.pop(__snake_case) for feature in features]
_UpperCamelCase : Dict = len(__snake_case)
_UpperCamelCase : Tuple = len(features[0]['input_ids'])
_UpperCamelCase : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(__snake_case)] for feature in features
]
_UpperCamelCase : List[str] = list(chain(*__snake_case))
_UpperCamelCase : Union[str, Any] = self.tokenizer.pad(
__snake_case , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
_UpperCamelCase : Tuple = {k: v.view(__snake_case , __snake_case , -1) for k, v in batch.items()}
# Add back labels
_UpperCamelCase : List[str] = torch.tensor(__snake_case , dtype=torch.intaa)
return batch
def lowerCamelCase_ ( ) -> str:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase : Tuple = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
datasets.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_UpperCamelCase : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCamelCase : str = {}
if data_args.train_file is not None:
_UpperCamelCase : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
_UpperCamelCase : List[str] = data_args.validation_file
_UpperCamelCase : List[Any] = data_args.train_file.split('.' )[-1]
_UpperCamelCase : Tuple = load_dataset(
UpperCAmelCase_ , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCamelCase : Any = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCamelCase : str = [F'''ending{i}''' for i in range(4 )]
_UpperCamelCase : int = 'sent1'
_UpperCamelCase : Optional[int] = 'sent2'
if data_args.max_seq_length is None:
_UpperCamelCase : str = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_UpperCamelCase : Optional[int] = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_UpperCamelCase : Any = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCAmelCase_ : Any ):
_UpperCamelCase : Any = [[context] * 4 for context in examples[context_name]]
_UpperCamelCase : Union[str, Any] = examples[question_header_name]
_UpperCamelCase : List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(UpperCAmelCase_ )
]
# Flatten out
_UpperCamelCase : Tuple = list(chain(*UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = list(chain(*UpperCAmelCase_ ) )
# Tokenize
_UpperCamelCase : Any = tokenizer(
UpperCAmelCase_ , UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCamelCase : Tuple = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCamelCase : List[Any] = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_UpperCamelCase : Optional[Any] = train_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_UpperCamelCase : Dict = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCamelCase : Any = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCamelCase : Optional[Any] = min(len(UpperCAmelCase_ ) , data_args.max_eval_samples )
_UpperCamelCase : Any = eval_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_UpperCamelCase : Dict = eval_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_UpperCamelCase : List[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCAmelCase_ : Any ):
_UpperCamelCase , _UpperCamelCase : Dict = eval_predictions
_UpperCamelCase : int = np.argmax(UpperCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCamelCase : Optional[int] = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
# Training
if training_args.do_train:
_UpperCamelCase : str = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase : List[Any] = last_checkpoint
_UpperCamelCase : List[str] = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCamelCase : Optional[Any] = train_result.metrics
_UpperCamelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_UpperCamelCase : List[Any] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('train' , UpperCAmelCase_ )
trainer.save_metrics('train' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCamelCase : List[str] = trainer.evaluate()
_UpperCamelCase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
_UpperCamelCase : Dict = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 1
|
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError('List is empty' )
return sum(UpperCAmelCase_ ) / len(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 1
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
_UpperCamelCase : List[Any] = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[Any] = True
for i in range(0 , len(UpperCAmelCase_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase , _UpperCamelCase : List[str] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Tuple = False
for i in range(1 , len(UpperCAmelCase_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase , _UpperCamelCase : List[str] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Dict = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
lowerCAmelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCAmelCase__ = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 648
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 1
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : bool = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(UpperCAmelCase_ ), magnitude * sin(UpperCAmelCase_ )]
return [magnitude * cos(radians(UpperCAmelCase_ ) ), magnitude * sin(radians(UpperCAmelCase_ ) )]
def lowerCamelCase_ ( UpperCAmelCase_ : NDArray[floataa] , UpperCAmelCase_ : NDArray[floataa] , UpperCAmelCase_ : float = 1_0**-1 ) -> bool:
'''simple docstring'''
_UpperCamelCase : NDArray[floataa] = cross(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : float = sum(UpperCAmelCase_ )
return abs(UpperCAmelCase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCAmelCase__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
lowerCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCAmelCase__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
lowerCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCAmelCase__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
lowerCAmelCase__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 648
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 1
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Tuple = MobileBertConfig.from_json_file(UpperCAmelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCamelCase : Any = MobileBertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
_UpperCamelCase : Tuple = load_tf_weights_in_mobilebert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 648
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 1
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase ( ctypes.Structure ):
"""simple docstring"""
a__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
if os.name == "nt":
_UpperCamelCase : Tuple = CursorInfo()
_UpperCamelCase : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) )
_UpperCamelCase : Optional[int] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
if os.name == "nt":
_UpperCamelCase : int = CursorInfo()
_UpperCamelCase : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) )
_UpperCamelCase : Union[str, Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def lowerCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 1
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 1
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar("""KT""")
lowerCAmelCase__ = TypeVar("""VT""")
class lowercase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , __snake_case = "root" , __snake_case = None):
_UpperCamelCase : List[str] = key
_UpperCamelCase : List[Any] = value
_UpperCamelCase : list[Node[KT, VT]] = []
def __repr__( self):
return f'''Node({self.key}: {self.value})'''
@property
def A__ ( self):
return len(self.forward)
class lowercase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , __snake_case = 0.5 , __snake_case = 16):
_UpperCamelCase : Node[KT, VT] = Node[KT, VT]()
_UpperCamelCase : Any = 0
_UpperCamelCase : Optional[int] = p
_UpperCamelCase : int = max_level
def __str__( self):
_UpperCamelCase : Tuple = list(self)
if len(__snake_case) == 0:
return f'''SkipList(level={self.level})'''
_UpperCamelCase : str = max((len(str(__snake_case)) for item in items) , default=4)
_UpperCamelCase : Optional[Any] = max(__snake_case , 4) + 4
_UpperCamelCase : List[Any] = self.head
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(__snake_case , '-') + '* ' * len(__snake_case))
lines.append(' ' * label_size + '| ' * len(__snake_case))
while len(node.forward) != 0:
_UpperCamelCase : Optional[int] = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(__snake_case , '-')
+ ' '.join(str(n.key) if n.key == node.key else '|' for n in forwards))
lines.append(' ' * label_size + '| ' * len(__snake_case))
_UpperCamelCase : Any = node.forward
lines.append('None'.ljust(__snake_case) + '* ' * len(__snake_case))
return f'''SkipList(level={self.level})\n''' + "\n".join(__snake_case)
def __iter__( self):
_UpperCamelCase : Dict = self.head
while len(node.forward) != 0:
yield node.forward[0].key
_UpperCamelCase : Any = node.forward[0]
def A__ ( self):
_UpperCamelCase : List[Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Tuple = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_UpperCamelCase : Union[str, Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__snake_case)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def A__ ( self , __snake_case):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self._locate_node(__snake_case)
if node is not None:
for i, update_node in enumerate(__snake_case):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_UpperCamelCase : Optional[int] = node.forward[i]
else:
_UpperCamelCase : List[str] = update_node.forward[:i]
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self._locate_node(__snake_case)
if node is not None:
_UpperCamelCase : str = value
else:
_UpperCamelCase : str = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __snake_case):
update_vector.append(self.head)
_UpperCamelCase : Union[str, Any] = level
_UpperCamelCase : Optional[Any] = Node(__snake_case , __snake_case)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(__snake_case)
else:
_UpperCamelCase : int = new_node
def A__ ( self , __snake_case):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self._locate_node(__snake_case)
if node is not None:
return node.value
return None
def lowerCamelCase_ ( ) -> str:
'''simple docstring'''
_UpperCamelCase : Tuple = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 1_2 )
skip_list.insert('Key3' , 4_1 )
skip_list.insert('Key4' , -1_9 )
_UpperCamelCase : List[str] = skip_list.head
_UpperCamelCase : Tuple = {}
while node.level != 0:
_UpperCamelCase : Any = node.forward[0]
_UpperCamelCase : Dict = node.value
assert len(UpperCAmelCase_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = SkipList()
skip_list.insert('Key1' , 1_0 )
skip_list.insert('Key1' , 1_2 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 1_0 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 1_0 )
_UpperCamelCase : Tuple = skip_list.head
_UpperCamelCase : Dict = {}
while node.level != 0:
_UpperCamelCase : str = node.forward[0]
_UpperCamelCase : Union[str, Any] = node.value
if len(UpperCAmelCase_ ) != 4:
print()
assert len(UpperCAmelCase_ ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def lowerCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : str = SkipList()
assert skip_list.find('Some key' ) is None
def lowerCamelCase_ ( ) -> str:
'''simple docstring'''
_UpperCamelCase : int = SkipList()
skip_list.insert('Key2' , 2_0 )
assert skip_list.find('Key2' ) == 2_0
skip_list.insert('Some Key' , 1_0 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 1_3 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 1_0
assert skip_list.find('V' ) == 1_3
def lowerCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Tuple = SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 1_4
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[Any] = SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4_2 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('X' )
def traverse_keys(UpperCAmelCase_ : Any ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(UpperCAmelCase_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
def is_sorted(UpperCAmelCase_ : Dict ):
return all(next_item >= item for item, next_item in zip(UpperCAmelCase_ , lst[1:] ) )
_UpperCamelCase : str = SkipList()
for i in range(1_0 ):
skip_list.insert(UpperCAmelCase_ , UpperCAmelCase_ )
assert is_sorted(list(UpperCAmelCase_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(UpperCAmelCase_ ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(UpperCAmelCase_ ) )
def lowerCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 648
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[Any] = SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=['stage2', 'stage3', 'stage4'] , )
_UpperCamelCase : Optional[int] = DetaConfig(
backbone_config=UpperCAmelCase_ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=UpperCAmelCase_ , with_box_refine=UpperCAmelCase_ , two_stage=UpperCAmelCase_ , )
# set labels
_UpperCamelCase : Union[str, Any] = 'huggingface/label-files'
if "o365" in model_name:
_UpperCamelCase : Tuple = 3_6_6
_UpperCamelCase : int = 'object365-id2label.json'
else:
_UpperCamelCase : int = 9_1
_UpperCamelCase : Tuple = 'coco-detection-id2label.json'
_UpperCamelCase : int = num_labels
_UpperCamelCase : Dict = json.load(open(cached_download(hf_hub_url(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) ) , 'r' ) )
_UpperCamelCase : Dict = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : Any = idalabel
_UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : str = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = dct.pop(UpperCAmelCase_ )
_UpperCamelCase : List[str] = val
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase : Tuple = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_UpperCamelCase : Optional[Any] = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[str] = in_proj_weight[:dim, :]
_UpperCamelCase : str = in_proj_bias[: dim]
_UpperCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
_UpperCamelCase : Any = in_proj_weight[
-dim :, :
]
_UpperCamelCase : Tuple = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase : Union[str, Any] = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCamelCase : Optional[Any] = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : Any = in_proj_weight[:hidden_size, :]
_UpperCamelCase : str = in_proj_bias[:hidden_size]
_UpperCamelCase : str = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCamelCase : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase : str = in_proj_weight[-hidden_size:, :]
_UpperCamelCase : Optional[int] = in_proj_bias[-hidden_size:]
def lowerCamelCase_ ( ) -> str:
'''simple docstring'''
_UpperCamelCase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCamelCase : str = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : int = get_deta_config(UpperCAmelCase_ )
# load original state dict
if model_name == "deta-swin-large":
_UpperCamelCase : int = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase : str = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
_UpperCamelCase : str = torch.load(UpperCAmelCase_ , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(UpperCAmelCase_ , param.shape )
# rename keys
_UpperCamelCase : Union[str, Any] = create_rename_keys(UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_swin_q_k_v(UpperCAmelCase_ , config.backbone_config )
read_in_decoder_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCamelCase : str = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : List[str] = val
if "input_proj" in key:
_UpperCamelCase : int = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCamelCase : Union[str, Any] = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : Any = val
# finally, create HuggingFace model and load state dict
_UpperCamelCase : Any = DetaForObjectDetection(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
_UpperCamelCase : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(UpperCAmelCase_ )
# load image processor
_UpperCamelCase : Any = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_UpperCamelCase : int = prepare_img()
_UpperCamelCase : List[str] = processor(images=UpperCAmelCase_ , return_tensors='pt' )
_UpperCamelCase : List[str] = encoding['pixel_values']
_UpperCamelCase : Optional[Any] = model(pixel_values.to(UpperCAmelCase_ ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCamelCase : Union[str, Any] = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_UpperCamelCase : Optional[Any] = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase : Dict = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_UpperCamelCase : Optional[int] = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCAmelCase_ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCAmelCase_ ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F'''jozhang97/{model_name}''' )
processor.push_to_hub(F'''jozhang97/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase__ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 648
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 1
|
from collections import namedtuple
lowerCAmelCase__ = namedtuple("""from_to""", """from_ to""")
lowerCAmelCase__ = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.0_01, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.0_04_54, 2_64.1_72),
"""cubicyard""": from_to(0.7_64_55, 1.3_07_95),
"""cubicfoot""": from_to(0.0_28, 35.31_47),
"""cup""": from_to(0.0_00_23_65_88, 42_26.75),
}
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ', '.join(UpperCAmelCase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ', '.join(UpperCAmelCase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
| 1
|
import numpy as np
from PIL import Image
def lowerCamelCase_ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase : Any = np.array(UpperCAmelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_UpperCamelCase : List[str] = 0
_UpperCamelCase : str = 0
_UpperCamelCase : int = 0
_UpperCamelCase : Tuple = 0
# compute the shape of the output matrix
_UpperCamelCase : Optional[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_UpperCamelCase : List[Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_UpperCamelCase : Dict = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = 0
return updated_arr
def lowerCamelCase_ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase : Tuple = np.array(UpperCAmelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_UpperCamelCase : Any = 0
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = 0
# compute the shape of the output matrix
_UpperCamelCase : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_UpperCamelCase : List[str] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_UpperCamelCase : Any = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 648
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 1
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=50 , __snake_case=0.0_2 , __snake_case=True , __snake_case=None , ):
_UpperCamelCase : Tuple = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Dict = use_input_mask
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : Optional[Any] = scope
def A__ ( self):
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : List[str] = None
if self.use_input_mask:
_UpperCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : str = self.get_config()
return config, input_ids, input_mask, token_labels
def A__ ( self):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self):
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Any = self.prepare_config_and_inputs()
_UpperCamelCase : List[str] = True
_UpperCamelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case , ):
_UpperCamelCase : Dict = BertGenerationEncoder(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(__snake_case , attention_mask=__snake_case)
_UpperCamelCase : List[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case , ):
_UpperCamelCase : List[str] = True
_UpperCamelCase : Optional[int] = BertGenerationEncoder(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
_UpperCamelCase : Any = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case , ):
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Dict = BertGenerationDecoder(config=__snake_case).to(__snake_case).eval()
# first forward pass
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , use_cache=__snake_case , )
_UpperCamelCase : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCamelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
_UpperCamelCase : int = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCamelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1)
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , output_hidden_states=__snake_case , )['hidden_states'][0]
_UpperCamelCase : Any = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['hidden_states'][0]
# select random slice
_UpperCamelCase : int = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCamelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-3))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , *__snake_case , ):
_UpperCamelCase : Tuple = BertGenerationDecoder(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = self.prepare_config_and_inputs()
_UpperCamelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a__ = (BertGenerationDecoder,) if is_torch_available() else ()
a__ = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def A__ ( self):
_UpperCamelCase : Tuple = BertGenerationEncoderTester(self)
_UpperCamelCase : Tuple = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase : Optional[Any] = 'bert'
self.model_tester.create_and_check_model(__snake_case , __snake_case , __snake_case , __snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__snake_case)
def A__ ( self):
# This regression test was failing with PyTorch < 1.3
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCamelCase : str = None
self.model_tester.create_and_check_model_as_decoder(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__snake_case)
@slow
def A__ ( self):
_UpperCamelCase : List[str] = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
self.assertIsNotNone(__snake_case)
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self):
_UpperCamelCase : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
_UpperCamelCase : str = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
_UpperCamelCase : List[str] = model(__snake_case)[0]
_UpperCamelCase : str = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Dict = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self):
_UpperCamelCase : Tuple = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
_UpperCamelCase : Tuple = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : List[Any] = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4))
| 648
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 1
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
| 1
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vision-encoder-decoder"
a__ = True
def __init__( self , **__snake_case):
super().__init__(**__snake_case)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''')
_UpperCamelCase : Tuple = kwargs.pop('encoder')
_UpperCamelCase : Optional[Any] = encoder_config.pop('model_type')
_UpperCamelCase : List[str] = kwargs.pop('decoder')
_UpperCamelCase : List[str] = decoder_config.pop('model_type')
_UpperCamelCase : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case)
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = True
@classmethod
def A__ ( cls , __snake_case , __snake_case , **__snake_case):
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')
_UpperCamelCase : Tuple = True
_UpperCamelCase : int = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case)
def A__ ( self):
_UpperCamelCase : str = copy.deepcopy(self.__dict__)
_UpperCamelCase : List[Any] = self.encoder.to_dict()
_UpperCamelCase : Dict = self.decoder.to_dict()
_UpperCamelCase : Dict = self.__class__.model_type
return output
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = version.parse("1.11" )
@property
def A__ ( self):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def A__ ( self):
return 1e-4
@property
def A__ ( self):
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}})
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
_UpperCamelCase : str = OrderedDict()
_UpperCamelCase : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_UpperCamelCase : str = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_UpperCamelCase : Optional[int] = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def A__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ):
import torch
_UpperCamelCase : Tuple = OrderedDict()
_UpperCamelCase : str = super().generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case)
_UpperCamelCase , _UpperCamelCase : List[Any] = dummy_input['input_ids'].shape
_UpperCamelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCamelCase : Optional[int] = dummy_input.pop('input_ids')
_UpperCamelCase : Optional[Any] = dummy_input.pop('attention_mask')
_UpperCamelCase : Optional[int] = torch.zeros(__snake_case)
return common_inputs
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
pass
def A__ ( self , __snake_case):
return VisionEncoderDecoderEncoderOnnxConfig(__snake_case)
def A__ ( self , __snake_case , __snake_case , __snake_case = "default"):
_UpperCamelCase : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__snake_case , __snake_case)
| 648
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 1
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowerCAmelCase__ = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def lowerCamelCase_ ( UpperCAmelCase_ : str = "dhaka" , UpperCAmelCase_ : int = 5 ) -> int:
'''simple docstring'''
_UpperCamelCase : str = min(UpperCAmelCase_ , 5_0 ) # Prevent abuse!
_UpperCamelCase : int = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
_UpperCamelCase : str = requests.get('https://www.google.com/search' , params=UpperCAmelCase_ , headers=UpperCAmelCase_ )
_UpperCamelCase : List[Any] = BeautifulSoup(html.text , 'html.parser' )
_UpperCamelCase : int = ''.join(
re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
_UpperCamelCase : Optional[Any] = json.dumps(UpperCAmelCase_ )
_UpperCamelCase : List[str] = json.loads(UpperCAmelCase_ )
_UpperCamelCase : Dict = re.findall(
R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , UpperCAmelCase_ , )
if not matched_google_image_data:
return 0
_UpperCamelCase : Union[str, Any] = re.sub(
R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(UpperCAmelCase_ ) , )
_UpperCamelCase : int = re.findall(
R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , UpperCAmelCase_ , )
for index, fixed_full_res_image in enumerate(UpperCAmelCase_ ):
if index >= max_images:
return index
_UpperCamelCase : Optional[Any] = bytes(UpperCAmelCase_ , 'ascii' ).decode(
'unicode-escape' )
_UpperCamelCase : Union[str, Any] = bytes(UpperCAmelCase_ , 'ascii' ).decode(
'unicode-escape' )
_UpperCamelCase : List[str] = urllib.request.build_opener()
_UpperCamelCase : Tuple = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = F'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
urllib.request.urlretrieve( # noqa: S310
UpperCAmelCase_ , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
lowerCAmelCase__ = download_images_from_google_query(sys.argv[1])
print(f'{image_count} images were downloaded to disk.')
except IndexError:
print("""Please provide a search term.""")
raise
| 648
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 1
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase__ = None
try:
import msvcrt
except ImportError:
lowerCAmelCase__ = None
try:
import fcntl
except ImportError:
lowerCAmelCase__ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase__ = OSError
# Data
# ------------------------------------------------
lowerCAmelCase__ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
lowerCAmelCase__ = """3.0.12"""
lowerCAmelCase__ = None
def lowerCamelCase_ ( ) -> int:
'''simple docstring'''
global _logger
_UpperCamelCase : Dict = _logger or logging.getLogger(__name__ )
return _logger
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , __snake_case):
_UpperCamelCase : Dict = lock_file
return None
def __str__( self):
_UpperCamelCase : Tuple = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case):
_UpperCamelCase : Union[str, Any] = lock
return None
def __enter__( self):
return self.lock
def __exit__( self , __snake_case , __snake_case , __snake_case):
self.lock.release()
return None
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=-1 , __snake_case=None):
_UpperCamelCase : Optional[int] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_UpperCamelCase : Dict = self.hash_filename_if_too_long(__snake_case , __snake_case)
# The path to the lock file.
_UpperCamelCase : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_UpperCamelCase : Dict = None
# The default timeout value.
_UpperCamelCase : str = timeout
# We use this lock primarily for the lock counter.
_UpperCamelCase : Any = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_UpperCamelCase : Union[str, Any] = 0
return None
@property
def A__ ( self):
return self._lock_file
@property
def A__ ( self):
return self._timeout
@timeout.setter
def A__ ( self , __snake_case):
_UpperCamelCase : Dict = float(__snake_case)
return None
def A__ ( self):
raise NotImplementedError()
def A__ ( self):
raise NotImplementedError()
@property
def A__ ( self):
return self._lock_file_fd is not None
def A__ ( self , __snake_case=None , __snake_case=0.0_5):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_UpperCamelCase : Tuple = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_UpperCamelCase : Optional[int] = id(self)
_UpperCamelCase : Optional[int] = self._lock_file
_UpperCamelCase : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''')
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''')
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''')
raise Timeout(self._lock_file)
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''')
time.sleep(__snake_case)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_UpperCamelCase : Dict = max(0 , self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def A__ ( self , __snake_case=False):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_UpperCamelCase : Optional[Any] = id(self)
_UpperCamelCase : List[str] = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''')
self._release()
_UpperCamelCase : List[Any] = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''')
return None
def __enter__( self):
self.acquire()
return self
def __exit__( self , __snake_case , __snake_case , __snake_case):
self.release()
return None
def __del__( self):
self.release(force=__snake_case)
return None
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : Tuple = os.path.basename(__snake_case)
if len(__snake_case) > max_length and max_length > 0:
_UpperCamelCase : Optional[Any] = os.path.dirname(__snake_case)
_UpperCamelCase : Union[str, Any] = str(hash(__snake_case))
_UpperCamelCase : List[str] = filename[: max_length - len(__snake_case) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(__snake_case , __snake_case)
else:
return path
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=-1 , __snake_case=None):
from .file_utils import relative_to_absolute_path
super().__init__(__snake_case , timeout=__snake_case , max_filename_length=__snake_case)
_UpperCamelCase : Optional[int] = '\\\\?\\' + relative_to_absolute_path(self.lock_file)
def A__ ( self):
_UpperCamelCase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_UpperCamelCase : Dict = os.open(self._lock_file , __snake_case)
except OSError:
pass
else:
try:
msvcrt.locking(__snake_case , msvcrt.LK_NBLCK , 1)
except OSError:
os.close(__snake_case)
else:
_UpperCamelCase : List[Any] = fd
return None
def A__ ( self):
_UpperCamelCase : str = self._lock_file_fd
_UpperCamelCase : int = None
msvcrt.locking(__snake_case , msvcrt.LK_UNLCK , 1)
os.close(__snake_case)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=-1 , __snake_case=None):
_UpperCamelCase : Union[str, Any] = os.statvfs(os.path.dirname(__snake_case)).f_namemax
super().__init__(__snake_case , timeout=__snake_case , max_filename_length=__snake_case)
def A__ ( self):
_UpperCamelCase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_UpperCamelCase : Dict = os.open(self._lock_file , __snake_case)
try:
fcntl.flock(__snake_case , fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(__snake_case)
else:
_UpperCamelCase : Union[str, Any] = fd
return None
def A__ ( self):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_UpperCamelCase : Dict = self._lock_file_fd
_UpperCamelCase : Union[str, Any] = None
fcntl.flock(__snake_case , fcntl.LOCK_UN)
os.close(__snake_case)
return None
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_UpperCamelCase : Optional[int] = os.open(self._lock_file , __snake_case)
except OSError:
pass
else:
_UpperCamelCase : Dict = fd
return None
def A__ ( self):
os.close(self._lock_file_fd)
_UpperCamelCase : Optional[Any] = None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase__ = None
if msvcrt:
lowerCAmelCase__ = WindowsFileLock
elif fcntl:
lowerCAmelCase__ = UnixFileLock
else:
lowerCAmelCase__ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 648
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 1
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict ) -> int:
'''simple docstring'''
_UpperCamelCase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = FlaxAutoModelForSeqaSeqLM.from_config(config=UpperCAmelCase_ )
_UpperCamelCase : Any = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
_UpperCamelCase : int = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
_UpperCamelCase : Optional[int] = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_UpperCamelCase : Any = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCamelCase : Any = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
_UpperCamelCase : str = F'''layers_{str(UpperCAmelCase_ )}'''
# Self-Attention
_UpperCamelCase : Tuple = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
_UpperCamelCase : str = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
_UpperCamelCase : int = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
_UpperCamelCase : List[Any] = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCamelCase : Optional[Any] = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
_UpperCamelCase : Union[str, Any] = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
_UpperCamelCase : Dict = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCamelCase : str = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCamelCase : Dict = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
_UpperCamelCase : List[str] = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCamelCase : Optional[Any] = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCamelCase : int = flax_model.params['encoder']['block'][str(UpperCAmelCase_ )]['layer']
_UpperCamelCase : Any = tax_attention_key
_UpperCamelCase : Optional[Any] = tax_attention_out
_UpperCamelCase : Optional[Any] = tax_attention_query
_UpperCamelCase : Optional[int] = tax_attention_value
_UpperCamelCase : Optional[Any] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCamelCase : int = tax_global_layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[int] = tax_mlp_wi_a
_UpperCamelCase : Tuple = tax_mlp_wi_a
else:
_UpperCamelCase : List[Any] = tax_mlp_wi
_UpperCamelCase : List[str] = tax_mlp_wo
_UpperCamelCase : Optional[Any] = tax_mlp_layer_norm
_UpperCamelCase : Any = flax_model_encoder_layer_block
# Only for layer 0:
_UpperCamelCase : Tuple = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
_UpperCamelCase : Dict = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCamelCase : Dict = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
_UpperCamelCase : Any = tax_encoder_global_rel_embedding
# Assigning
_UpperCamelCase : Optional[int] = tax_model['target']['encoder']['encoder_norm']['scale']
_UpperCamelCase : Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_UpperCamelCase : str = F'''layers_{str(UpperCAmelCase_ )}'''
# Self-Attention
_UpperCamelCase : Union[str, Any] = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
_UpperCamelCase : Dict = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
_UpperCamelCase : Optional[int] = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
_UpperCamelCase : Optional[Any] = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
_UpperCamelCase : List[str] = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
_UpperCamelCase : Any = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
_UpperCamelCase : Optional[int] = tax_enc_dec_attention_module['key']['kernel']
_UpperCamelCase : Tuple = tax_enc_dec_attention_module['out']['kernel']
_UpperCamelCase : str = tax_enc_dec_attention_module['query']['kernel']
_UpperCamelCase : List[str] = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
_UpperCamelCase : Optional[int] = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
_UpperCamelCase : str = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCamelCase : int = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCamelCase : Tuple = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
_UpperCamelCase : Optional[int] = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCamelCase : List[Any] = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCamelCase : List[str] = flax_model.params['decoder']['block'][str(UpperCAmelCase_ )]['layer']
_UpperCamelCase : List[Any] = tax_attention_key
_UpperCamelCase : Optional[int] = tax_attention_out
_UpperCamelCase : Dict = tax_attention_query
_UpperCamelCase : Dict = tax_attention_value
_UpperCamelCase : Dict = tax_pre_attention_layer_norm
_UpperCamelCase : Optional[Any] = tax_enc_dec_attention_key
_UpperCamelCase : List[Any] = tax_enc_dec_attention_out
_UpperCamelCase : int = tax_enc_dec_attention_query
_UpperCamelCase : int = tax_enc_dec_attention_value
_UpperCamelCase : Any = tax_cross_layer_norm
if split_mlp_wi:
_UpperCamelCase : int = tax_mlp_wi_a
_UpperCamelCase : int = tax_mlp_wi_a
else:
_UpperCamelCase : Any = tax_mlp_wi
_UpperCamelCase : Tuple = tax_mlp_wo
_UpperCamelCase : List[Any] = txa_mlp_layer_norm
_UpperCamelCase : str = flax_model_decoder_layer_block
# Decoder Normalization
_UpperCamelCase : Optional[Any] = tax_model['target']['decoder']['decoder_norm']['scale']
_UpperCamelCase : Dict = txa_decoder_norm
# Only for layer 0:
_UpperCamelCase : Optional[int] = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
_UpperCamelCase : str = tax_decoder_rel_embedding
# Token Embeddings
_UpperCamelCase : List[str] = tax_model['target']['token_embedder']['embedding']
_UpperCamelCase : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_UpperCamelCase : List[str] = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(UpperCAmelCase_ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
lowerCAmelCase__ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 648
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = KandinskyVaaInpaintPipeline
a__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
a__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
a__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a__ = False
@property
def A__ ( self):
return 32
@property
def A__ ( self):
return 32
@property
def A__ ( self):
return self.time_input_dim
@property
def A__ ( self):
return self.time_input_dim * 4
@property
def A__ ( self):
return 1_00
@property
def A__ ( self):
torch.manual_seed(0)
_UpperCamelCase : Optional[Any] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_UpperCamelCase : Tuple = UNetaDConditionModel(**__snake_case)
return model
@property
def A__ ( self):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self):
torch.manual_seed(0)
_UpperCamelCase : int = VQModel(**self.dummy_movq_kwargs)
return model
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.dummy_unet
_UpperCamelCase : List[str] = self.dummy_movq
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type='epsilon' , thresholding=__snake_case , )
_UpperCamelCase : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def A__ ( self , __snake_case , __snake_case=0):
_UpperCamelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case)).to(__snake_case)
_UpperCamelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__snake_case)
# create init_image
_UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case)).to(__snake_case)
_UpperCamelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase : Dict = Image.fromarray(np.uinta(__snake_case)).convert('RGB').resize((2_56, 2_56))
# create mask
_UpperCamelCase : Tuple = np.ones((64, 64) , dtype=np.floataa)
_UpperCamelCase : int = 0
if str(__snake_case).startswith('mps'):
_UpperCamelCase : List[str] = torch.manual_seed(__snake_case)
else:
_UpperCamelCase : Tuple = torch.Generator(device=__snake_case).manual_seed(__snake_case)
_UpperCamelCase : Optional[Any] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def A__ ( self):
_UpperCamelCase : List[str] = 'cpu'
_UpperCamelCase : Union[str, Any] = self.get_dummy_components()
_UpperCamelCase : str = self.pipeline_class(**__snake_case)
_UpperCamelCase : int = pipe.to(__snake_case)
pipe.set_progress_bar_config(disable=__snake_case)
_UpperCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(__snake_case))
_UpperCamelCase : List[str] = output.images
_UpperCamelCase : Union[str, Any] = pipe(
**self.get_dummy_inputs(__snake_case) , return_dict=__snake_case , )[0]
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''')
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : List[Any] = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def A__ ( self):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
_UpperCamelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy')
_UpperCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
_UpperCamelCase : List[Any] = np.ones((7_68, 7_68) , dtype=np.floataa)
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : List[str] = 'a hat'
_UpperCamelCase : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa)
pipe_prior.to(__snake_case)
_UpperCamelCase : List[str] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa)
_UpperCamelCase : Union[str, Any] = pipeline.to(__snake_case)
pipeline.set_progress_bar_config(disable=__snake_case)
_UpperCamelCase : int = torch.Generator(device='cpu').manual_seed(0)
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_UpperCamelCase : List[Any] = pipeline(
image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
_UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case)
| 648
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "gptj"
a__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __snake_case=5_04_00 , __snake_case=20_48 , __snake_case=40_96 , __snake_case=28 , __snake_case=16 , __snake_case=64 , __snake_case=None , __snake_case="gelu_new" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=1e-5 , __snake_case=0.0_2 , __snake_case=True , __snake_case=5_02_56 , __snake_case=5_02_56 , __snake_case=False , **__snake_case , ):
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[Any] = n_positions
_UpperCamelCase : str = n_embd
_UpperCamelCase : Dict = n_layer
_UpperCamelCase : Any = n_head
_UpperCamelCase : int = n_inner
_UpperCamelCase : Optional[Any] = rotary_dim
_UpperCamelCase : Any = activation_function
_UpperCamelCase : Union[str, Any] = resid_pdrop
_UpperCamelCase : Any = embd_pdrop
_UpperCamelCase : Any = attn_pdrop
_UpperCamelCase : Dict = layer_norm_epsilon
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Optional[int] = use_cache
_UpperCamelCase : int = bos_token_id
_UpperCamelCase : Any = eos_token_id
super().__init__(
bos_token_id=__snake_case , eos_token_id=__snake_case , tie_word_embeddings=__snake_case , **__snake_case)
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case = "default" , __snake_case = None , __snake_case = False , ):
super().__init__(__snake_case , task=__snake_case , patching_specs=__snake_case , use_past=__snake_case)
if not getattr(self._config , 'pad_token_id' , __snake_case):
# TODO: how to do that better?
_UpperCamelCase : Optional[int] = 0
@property
def A__ ( self):
_UpperCamelCase : Tuple = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction='inputs')
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_UpperCamelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A__ ( self):
return self._config.n_layer
@property
def A__ ( self):
return self._config.n_head
def A__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ):
_UpperCamelCase : Dict = super(__snake_case , self).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case)
# We need to order the input in the way they appears in the forward()
_UpperCamelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCamelCase : int = seqlen + 2
_UpperCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCamelCase : Tuple = [
(torch.zeros(__snake_case), torch.zeros(__snake_case)) for _ in range(self.num_layers)
]
_UpperCamelCase : Optional[Any] = common_inputs['attention_mask']
if self.use_past:
_UpperCamelCase : Tuple = ordered_inputs['attention_mask'].dtype
_UpperCamelCase : str = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__snake_case , __snake_case , dtype=__snake_case)] , dim=1)
return ordered_inputs
@property
def A__ ( self):
return 13
| 648
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bloom"
a__ = ["past_key_values"]
a__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , __snake_case=25_08_80 , __snake_case=64 , __snake_case=2 , __snake_case=8 , __snake_case=1e-5 , __snake_case=0.0_2 , __snake_case=True , __snake_case=1 , __snake_case=2 , __snake_case=False , __snake_case=0.0 , __snake_case=0.0 , __snake_case=1 , __snake_case=False , **__snake_case , ):
_UpperCamelCase : Dict = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCamelCase : Dict = kwargs.pop('n_embed' , __snake_case)
_UpperCamelCase : Optional[int] = hidden_size if n_embed is None else n_embed
_UpperCamelCase : Optional[int] = n_layer
_UpperCamelCase : Tuple = n_head
_UpperCamelCase : Any = layer_norm_epsilon
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Tuple = use_cache
_UpperCamelCase : Union[str, Any] = pretraining_tp
_UpperCamelCase : List[str] = apply_residual_connection_post_layernorm
_UpperCamelCase : Optional[int] = hidden_dropout
_UpperCamelCase : str = attention_dropout
_UpperCamelCase : List[str] = bos_token_id
_UpperCamelCase : Dict = eos_token_id
_UpperCamelCase : Optional[int] = slow_but_exact
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = version.parse("1.12" )
def __init__( self , __snake_case , __snake_case = "default" , __snake_case = None , __snake_case = False , ):
super().__init__(__snake_case , task=__snake_case , patching_specs=__snake_case , use_past=__snake_case)
if not getattr(self._config , 'pad_token_id' , __snake_case):
# TODO: how to do that better?
_UpperCamelCase : int = 0
@property
def A__ ( self):
_UpperCamelCase : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__snake_case , direction='inputs' , inverted_values_shape=__snake_case)
_UpperCamelCase : int = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_UpperCamelCase : Dict = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A__ ( self):
return self._config.n_layer
@property
def A__ ( self):
return self._config.n_head
@property
def A__ ( self):
return 1e-3
def A__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ):
_UpperCamelCase : int = super(__snake_case , self).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case)
# We need to order the input in the way they appears in the forward()
_UpperCamelCase : Tuple = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
_UpperCamelCase , _UpperCamelCase : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCamelCase : List[str] = seqlen + 2
_UpperCamelCase : Union[str, Any] = self._config.hidden_size // self.num_attention_heads
_UpperCamelCase : str = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_UpperCamelCase : List[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_UpperCamelCase : List[Any] = [
(torch.zeros(__snake_case), torch.zeros(__snake_case)) for _ in range(self.num_layers)
]
_UpperCamelCase : Optional[Any] = common_inputs['attention_mask']
if self.use_past:
_UpperCamelCase : List[Any] = ordered_inputs['attention_mask'].dtype
_UpperCamelCase : Optional[int] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__snake_case , __snake_case , dtype=__snake_case)] , dim=1)
return ordered_inputs
@property
def A__ ( self):
return 13
| 648
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 1
|
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
while a != 0:
_UpperCamelCase , _UpperCamelCase : List[str] = b % a, a
return b
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
_UpperCamelCase : Dict = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = 1, 0, a
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = 0, 1, m
while va != 0:
_UpperCamelCase : Any = ua // va
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 648
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
if "resnet-50" in model_name:
_UpperCamelCase : List[Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_UpperCamelCase : Any = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_UpperCamelCase : List[str] = DetrConfig(use_timm_backbone=UpperCAmelCase_ , backbone_config=UpperCAmelCase_ )
# set label attributes
_UpperCamelCase : int = 'panoptic' in model_name
if is_panoptic:
_UpperCamelCase : List[str] = 2_5_0
else:
_UpperCamelCase : Tuple = 9_1
_UpperCamelCase : List[Any] = 'huggingface/label-files'
_UpperCamelCase : Tuple = 'coco-detection-id2label.json'
_UpperCamelCase : int = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_UpperCamelCase : Dict = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : Any = idalabel
_UpperCamelCase : List[str] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase : int = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
_UpperCamelCase : List[str] = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : Dict = val
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : str = ''
if is_panoptic:
_UpperCamelCase : Any = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCamelCase : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCamelCase : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : str = in_proj_weight[:2_5_6, :]
_UpperCamelCase : List[str] = in_proj_bias[:2_5_6]
_UpperCamelCase : List[str] = in_proj_weight[2_5_6:5_1_2, :]
_UpperCamelCase : List[Any] = in_proj_bias[2_5_6:5_1_2]
_UpperCamelCase : str = in_proj_weight[-2_5_6:, :]
_UpperCamelCase : str = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCamelCase : Optional[Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[str] = in_proj_weight[:2_5_6, :]
_UpperCamelCase : Tuple = in_proj_bias[:2_5_6]
_UpperCamelCase : Union[str, Any] = in_proj_weight[2_5_6:5_1_2, :]
_UpperCamelCase : List[Any] = in_proj_bias[2_5_6:5_1_2]
_UpperCamelCase : List[Any] = in_proj_weight[-2_5_6:, :]
_UpperCamelCase : List[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
_UpperCamelCase : List[str] = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_UpperCamelCase : Any = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCamelCase : List[str] = in_proj_weight_cross_attn[:2_5_6, :]
_UpperCamelCase : str = in_proj_bias_cross_attn[:2_5_6]
_UpperCamelCase : Tuple = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
_UpperCamelCase : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
_UpperCamelCase : Union[str, Any] = in_proj_weight_cross_attn[-2_5_6:, :]
_UpperCamelCase : str = in_proj_bias_cross_attn[-2_5_6:]
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
_UpperCamelCase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCamelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = get_detr_config(UpperCAmelCase_ )
# load original model from torch hub
_UpperCamelCase : Dict = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
_UpperCamelCase : List[str] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase_ ).eval()
_UpperCamelCase : Optional[Any] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase_ ):
if is_panoptic:
_UpperCamelCase : List[Any] = 'detr.' + src
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase_ , is_panoptic=UpperCAmelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCamelCase : Dict = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_UpperCamelCase : Union[str, Any] = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_UpperCamelCase : List[Any] = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_UpperCamelCase : str = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_UpperCamelCase : Union[str, Any] = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
_UpperCamelCase : Tuple = DetrForSegmentation(UpperCAmelCase_ ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
# verify our conversion on an image
_UpperCamelCase : int = 'coco_panoptic' if is_panoptic else 'coco_detection'
_UpperCamelCase : Optional[Any] = DetrImageProcessor(format=UpperCAmelCase_ )
_UpperCamelCase : List[Any] = processor(images=prepare_img() , return_tensors='pt' )
_UpperCamelCase : str = encoding['pixel_values']
_UpperCamelCase : Optional[Any] = detr(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = model(UpperCAmelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
lowerCAmelCase__ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 1
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=False , __snake_case=True , __snake_case="None" , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : Optional[int] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : Optional[int] = seq_length
_UpperCamelCase : Union[str, Any] = is_training
_UpperCamelCase : Any = use_input_mask
_UpperCamelCase : Dict = use_token_type_ids
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : int = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : Optional[int] = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Tuple = type_sequence_label_size
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : str = num_labels
_UpperCamelCase : str = num_choices
_UpperCamelCase : Optional[Any] = relative_attention
_UpperCamelCase : str = position_biased_input
_UpperCamelCase : Any = pos_att_type
_UpperCamelCase : Optional[Any] = scope
def A__ ( self):
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : List[str] = None
if self.use_input_mask:
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
_UpperCamelCase : List[str] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[Any] = None
_UpperCamelCase : List[Any] = None
if self.use_labels:
_UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def A__ ( self , __snake_case):
self.parent.assertListEqual(list(result.loss.size()) , [])
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = DebertaVaModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)[0]
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)[0]
_UpperCamelCase : List[Any] = model(__snake_case)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = DebertaVaForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Any = DebertaVaForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(__snake_case)
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = self.num_labels
_UpperCamelCase : Union[str, Any] = DebertaVaForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = DebertaVaForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = DebertaVaForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = DebertaVaModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__snake_case)
@slow
def A__ ( self):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = DebertaVaModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet')
def A__ ( self):
pass
@slow
def A__ ( self):
_UpperCamelCase : Dict = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge')
_UpperCamelCase : List[str] = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]])
_UpperCamelCase : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCamelCase : List[Any] = model(__snake_case , attention_mask=__snake_case)[0]
# compare the actual values for a slice.
_UpperCamelCase : int = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1e-4) , f'''{output[:, 1:4, 1:4]}''')
| 648
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 1
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCAmelCase__ = pd.read_csv("""sample_data.csv""", header=None)
lowerCAmelCase__ = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCAmelCase__ = df.iloc[:, 1:2]
lowerCAmelCase__ = actual_data.values.reshape(len_data, 1)
lowerCAmelCase__ = MinMaxScaler().fit_transform(actual_data)
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 5
lowerCAmelCase__ = 2_0
lowerCAmelCase__ = len_data - periods * look_back
lowerCAmelCase__ = actual_data[:division]
lowerCAmelCase__ = actual_data[division - look_back :]
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCAmelCase__ = np.array(train_x)
lowerCAmelCase__ = np.array(test_x)
lowerCAmelCase__ = np.array([list(i.ravel()) for i in train_y])
lowerCAmelCase__ = np.array([list(i.ravel()) for i in test_y])
lowerCAmelCase__ = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
lowerCAmelCase__ = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
lowerCAmelCase__ = model.predict(x_test)
| 648
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 1
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = None
a__ = BloomTokenizerFast
a__ = BloomTokenizerFast
a__ = True
a__ = False
a__ = "tokenizer_file"
a__ = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def A__ ( self):
super().setUp()
_UpperCamelCase : int = BloomTokenizerFast.from_pretrained('bigscience/tokenizer')
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self , **__snake_case):
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer()
_UpperCamelCase : List[Any] = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
_UpperCamelCase : List[Any] = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
_UpperCamelCase : Optional[int] = tokenizer.batch_encode_plus(__snake_case)['input_ids']
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Dict = tokenizer.batch_decode(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self , __snake_case=6):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_UpperCamelCase : Union[str, Any] = 'This is a simple input'
_UpperCamelCase : Tuple = ['This is a simple input 1', 'This is a simple input 2']
_UpperCamelCase : Dict = ('This is a simple input', 'This is a pair')
_UpperCamelCase : int = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(__snake_case , max_length=__snake_case)
tokenizer_r.encode_plus(__snake_case , max_length=__snake_case)
tokenizer_r.batch_encode_plus(__snake_case , max_length=__snake_case)
tokenizer_r.encode(__snake_case , max_length=__snake_case)
tokenizer_r.batch_encode_plus(__snake_case , max_length=__snake_case)
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding')
_UpperCamelCase : Tuple = None # Hotfixing padding = None
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='max_length')
# Simple input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='max_length')
# Simple input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='max_length' , )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='max_length')
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='max_length')
# Pair input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='max_length' , )
def A__ ( self):
_UpperCamelCase : str = self.get_rust_tokenizer()
_UpperCamelCase : List[str] = load_dataset('xnli' , 'all_languages' , split='test' , streaming=__snake_case)
_UpperCamelCase : List[str] = next(iter(__snake_case))['premise'] # pick up one data
_UpperCamelCase : List[Any] = list(sample_data.values())
_UpperCamelCase : int = list(map(tokenizer.encode , __snake_case))
_UpperCamelCase : Tuple = [tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case) for x in output_tokens]
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 648
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 3_2
def lowerCamelCase_ ( UpperCAmelCase_ : Accelerator , UpperCAmelCase_ : int = 1_6 , UpperCAmelCase_ : str = "bert-base-cased" ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase_ : Any ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCamelCase : Any = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_UpperCamelCase : Any = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
_UpperCamelCase : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCamelCase : int = 0
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase : List[Any] = model(**UpperCAmelCase_ )
_UpperCamelCase : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCamelCase , _UpperCamelCase : int = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCamelCase : str = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
_UpperCamelCase : Optional[Any] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase : Any = config['lr']
_UpperCamelCase : List[str] = int(config['num_epochs'] )
_UpperCamelCase : Optional[int] = int(config['seed'] )
_UpperCamelCase : List[Any] = int(config['batch_size'] )
_UpperCamelCase : Optional[Any] = args.model_name_or_path
set_seed(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : List[str] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
# Instantiate optimizer
_UpperCamelCase : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCamelCase : Dict = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
_UpperCamelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = (len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCamelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase_ , )
else:
_UpperCamelCase : List[Any] = DummyScheduler(UpperCAmelCase_ , total_num_steps=UpperCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Any = evaluate.load('glue' , 'mrpc' )
_UpperCamelCase : int = num_epochs
if args.partial_train_epoch is not None:
_UpperCamelCase : int = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_UpperCamelCase : str = args.resume_from_checkpoint.split('epoch_' )[1]
_UpperCamelCase : List[str] = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_UpperCamelCase : Any = int(UpperCAmelCase_ ) + 1
_UpperCamelCase : Optional[Any] = evaluation_loop(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
accelerator.print('resumed checkpoint performance:' , UpperCAmelCase_ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , 'r' ) as f:
_UpperCamelCase : Dict = json.load(UpperCAmelCase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_UpperCamelCase : int = {}
for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = model(**UpperCAmelCase_ )
_UpperCamelCase : int = outputs.loss
_UpperCamelCase : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_UpperCamelCase : int = F'''epoch_{epoch}'''
_UpperCamelCase : Tuple = os.path.join(args.output_dir , UpperCAmelCase_ )
accelerator.save_state(UpperCAmelCase_ )
_UpperCamelCase : Any = evaluation_loop(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : int = accuracy
_UpperCamelCase : Tuple = lr_scheduler.get_lr()[0]
_UpperCamelCase : int = optimizer.param_groups[0]['lr']
_UpperCamelCase : int = epoch
_UpperCamelCase : List[Any] = overall_step
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
_UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase_ , default=2 , help='Number of train epochs.' , )
_UpperCamelCase : Optional[int] = parser.parse_args()
_UpperCamelCase : Union[str, Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 648
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 1
|
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : list[float] ) -> float:
'''simple docstring'''
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
_UpperCamelCase : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(UpperCAmelCase_ ) )
return round(UpperCAmelCase_ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 1
|
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0 , UpperCAmelCase_ : int = 1_0_0_0 , UpperCAmelCase_ : bool = True ) -> int:
'''simple docstring'''
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> None:
'''simple docstring'''
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(UpperCAmelCase_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
_UpperCamelCase : Optional[int] = lower
_UpperCamelCase : int = higher
_UpperCamelCase : List[str] = []
while True:
_UpperCamelCase : int = get_avg(UpperCAmelCase_ , UpperCAmelCase_ )
last_numbers.append(UpperCAmelCase_ )
if answer(UpperCAmelCase_ ) == "low":
_UpperCamelCase : List[str] = number
elif answer(UpperCAmelCase_ ) == "high":
_UpperCamelCase : int = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
_UpperCamelCase : Dict = int(input('Enter lower value : ' ).strip() )
_UpperCamelCase : List[Any] = int(input('Enter high value : ' ).strip() )
_UpperCamelCase : Dict = int(input('Enter value to guess : ' ).strip() )
guess_the_number(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 648
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 1
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase_ ( UpperCAmelCase_ : NDArray[floataa] , UpperCAmelCase_ : NDArray[floataa] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , ) -> list[float]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = coefficient_matrix.shape
_UpperCamelCase , _UpperCamelCase : Optional[Any] = constant_matrix.shape
if rowsa != colsa:
_UpperCamelCase : Any = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(UpperCAmelCase_ )
if colsa != 1:
_UpperCamelCase : str = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(UpperCAmelCase_ )
if rowsa != rowsa:
_UpperCamelCase : int = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) != rowsa:
_UpperCamelCase : Optional[int] = (
'Number of initial values must be equal to number of rows in coefficient '
F'''matrix but received {len(UpperCAmelCase_ )} and {rowsa}'''
)
raise ValueError(UpperCAmelCase_ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
_UpperCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_UpperCamelCase , _UpperCamelCase : int = table.shape
strictly_diagonally_dominant(UpperCAmelCase_ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = []
for row in range(UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = 0
for col in range(UpperCAmelCase_ ):
if col == row:
_UpperCamelCase : Union[str, Any] = table[row][col]
elif col == cols - 1:
_UpperCamelCase : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_UpperCamelCase : Dict = (temp + val) / denom
new_val.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = new_val
return [float(UpperCAmelCase_ ) for i in new_val]
def lowerCamelCase_ ( UpperCAmelCase_ : NDArray[floataa] ) -> bool:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : List[Any] = table.shape
_UpperCamelCase : Any = True
for i in range(0 , UpperCAmelCase_ ):
_UpperCamelCase : Any = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 1
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=30 , __snake_case=2 , __snake_case=3 , __snake_case=True , __snake_case=True , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=10 , __snake_case=0.0_2 , __snake_case=None , __snake_case=2 , ):
_UpperCamelCase : Dict = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : int = image_size
_UpperCamelCase : Optional[Any] = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : Optional[int] = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = type_sequence_label_size
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : Dict = scope
_UpperCamelCase : List[str] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Dict = (image_size // patch_size) ** 2
_UpperCamelCase : Dict = num_patches + 1
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : int = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = ViTModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = ViTForMaskedImageModeling(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_UpperCamelCase : Dict = 1
_UpperCamelCase : Optional[Any] = ViTForMaskedImageModeling(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase : Union[str, Any] = model(__snake_case)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = self.type_sequence_label_size
_UpperCamelCase : Optional[int] = ViTForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_UpperCamelCase : Dict = 1
_UpperCamelCase : List[Any] = ViTForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase : Union[str, Any] = model(__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def A__ ( self):
_UpperCamelCase : Any = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
_UpperCamelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a__ = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
a__ = True
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Any = ViTModelTester(self)
_UpperCamelCase : Any = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__snake_case)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear))
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Tuple = model_class(__snake_case)
_UpperCamelCase : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Any = ViTModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def A__ ( self):
_UpperCamelCase : List[str] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(__snake_case)
_UpperCamelCase : Optional[int] = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : Optional[Any] = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : List[str] = model(**__snake_case)
# verify the logits
_UpperCamelCase : Optional[int] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : int = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@slow
def A__ ( self):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : Tuple = ViTModel.from_pretrained('facebook/dino-vits8').to(__snake_case)
_UpperCamelCase : Optional[int] = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_80)
_UpperCamelCase : Tuple = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__snake_case , return_tensors='pt')
_UpperCamelCase : str = inputs.pixel_values.to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(__snake_case , interpolate_pos_encoding=__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 36_01, 3_84))
self.assertEqual(outputs.last_hidden_state.shape , __snake_case)
_UpperCamelCase : Any = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self):
_UpperCamelCase : Any = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
_UpperCamelCase : Union[str, Any] = self.default_image_processor
_UpperCamelCase : Union[str, Any] = prepare_img()
_UpperCamelCase : Tuple = image_processor(images=__snake_case , return_tensors='pt')
_UpperCamelCase : List[Any] = inputs.pixel_values.to(__snake_case)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)
| 648
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "falcon"
a__ = ["past_key_values"]
def __init__( self , __snake_case=6_50_24 , __snake_case=45_44 , __snake_case=32 , __snake_case=71 , __snake_case=1e-5 , __snake_case=0.0_2 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=None , __snake_case=False , __snake_case=False , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=11 , __snake_case=11 , **__snake_case , ):
_UpperCamelCase : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCamelCase : Tuple = kwargs.pop('n_embed' , __snake_case)
_UpperCamelCase : Union[str, Any] = hidden_size if n_embed is None else n_embed
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Optional[Any] = layer_norm_epsilon
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : List[str] = hidden_dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : Union[str, Any] = eos_token_id
_UpperCamelCase : str = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCamelCase : List[Any] = alibi
_UpperCamelCase : Union[str, Any] = new_decoder_architecture
_UpperCamelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCamelCase : int = parallel_attn
_UpperCamelCase : List[str] = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
@property
def A__ ( self):
return self.hidden_size // self.num_attention_heads
@property
def A__ ( self):
return not self.alibi
| 648
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 1
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowercase :
"""simple docstring"""
a__ = 42 # [batch_size x 3]
a__ = 42 # [batch_size x 3]
a__ = 42 # [batch_size x 3]
a__ = 42 # [batch_size x 3]
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
def A__ ( self):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def A__ ( self):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa))
def A__ ( self):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa))
def A__ ( self):
_UpperCamelCase : Optional[int] = torch.arange(self.height * self.width)
_UpperCamelCase : Optional[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='trunc'),
] , axis=1 , )
return coords
@property
def A__ ( self):
_UpperCamelCase , *_UpperCamelCase : str = self.shape
_UpperCamelCase : Any = int(np.prod(__snake_case))
_UpperCamelCase : Union[str, Any] = self.get_image_coords()
_UpperCamelCase : str = torch.broadcast_to(coords.unsqueeze(0) , [batch_size * inner_batch_size, *coords.shape])
_UpperCamelCase : List[Any] = self.get_camera_rays(__snake_case)
_UpperCamelCase : Tuple = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3)
return rays
def A__ ( self , __snake_case):
_UpperCamelCase , *_UpperCamelCase , _UpperCamelCase : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_UpperCamelCase : str = coords.view(__snake_case , -1 , 2)
_UpperCamelCase : Optional[Any] = self.resolution()
_UpperCamelCase : str = self.fov()
_UpperCamelCase : int = (flat.float() / (res - 1)) * 2 - 1
_UpperCamelCase : str = fracs * torch.tan(fov / 2)
_UpperCamelCase : Optional[Any] = fracs.view(__snake_case , -1 , 2)
_UpperCamelCase : Any = (
self.z.view(__snake_case , 1 , 3)
+ self.x.view(__snake_case , 1 , 3) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3) * fracs[:, :, 1:]
)
_UpperCamelCase : int = directions / directions.norm(dim=-1 , keepdim=__snake_case)
_UpperCamelCase : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3) , [batch_size, directions.shape[1], 3]),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3)
def A__ ( self , __snake_case , __snake_case):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> DifferentiableProjectiveCamera:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Tuple = []
_UpperCamelCase : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
_UpperCamelCase : Optional[Any] = np.array([np.sin(UpperCAmelCase_ ), np.cos(UpperCAmelCase_ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_UpperCamelCase : List[Any] = -z * 4
_UpperCamelCase : Dict = np.array([np.cos(UpperCAmelCase_ ), -np.sin(UpperCAmelCase_ ), 0.0] )
_UpperCamelCase : int = np.cross(UpperCAmelCase_ , UpperCAmelCase_ )
origins.append(UpperCAmelCase_ )
xs.append(UpperCAmelCase_ )
ys.append(UpperCAmelCase_ )
zs.append(UpperCAmelCase_ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(UpperCAmelCase_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(UpperCAmelCase_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(UpperCAmelCase_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(UpperCAmelCase_ , axis=0 ) ).float() , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(UpperCAmelCase_ )) , )
| 648
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
| 1
|
from itertools import product
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> list[int]:
'''simple docstring'''
_UpperCamelCase : int = sides_number
_UpperCamelCase : List[Any] = max_face_number * dice_number
_UpperCamelCase : Any = [0] * (max_total + 1)
_UpperCamelCase : Dict = 1
_UpperCamelCase : int = range(UpperCAmelCase_ , max_face_number + 1 )
for dice_numbers in product(UpperCAmelCase_ , repeat=UpperCAmelCase_ ):
_UpperCamelCase : str = sum(UpperCAmelCase_ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase_ ( ) -> float:
'''simple docstring'''
_UpperCamelCase : Optional[int] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_UpperCamelCase : Optional[Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_UpperCamelCase : Dict = 0
_UpperCamelCase : Tuple = 9
_UpperCamelCase : int = 4 * 9
_UpperCamelCase : Union[str, Any] = 6
for peter_total in range(UpperCAmelCase_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCamelCase : int = (4**9) * (6**6)
_UpperCamelCase : Tuple = peter_wins_count / total_games_number
_UpperCamelCase : str = round(UpperCAmelCase_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }')
| 648
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case):
_UpperCamelCase : Tuple = value
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case):
_UpperCamelCase : Optional[Any] = tree
def A__ ( self , __snake_case):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self):
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 1
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
lowerCAmelCase__ = namedtuple("""covid_data""", """cases deaths recovered""")
def lowerCamelCase_ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
'''simple docstring'''
_UpperCamelCase : List[str] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) )
lowerCAmelCase__ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 648
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 1
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 648
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 1
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 1
|
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ) -> str:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Dict = F'''Expected string as input, found {type(UpperCAmelCase_ )}'''
raise ValueError(UpperCAmelCase_ )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Tuple = F'''Expected boolean as use_pascal parameter, found {type(UpperCAmelCase_ )}'''
raise ValueError(UpperCAmelCase_ )
_UpperCamelCase : Any = input_str.split('_' )
_UpperCamelCase : Dict = 0 if use_pascal else 1
_UpperCamelCase : Union[str, Any] = words[start_index:]
_UpperCamelCase : str = [word[0].upper() + word[1:] for word in words_to_capitalize]
_UpperCamelCase : Optional[int] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 648
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 1
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase ( _lowercase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def A__ ( __snake_case):
raise NotImplementedError()
@abstractmethod
def A__ ( self):
raise NotImplementedError()
| 648
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase__ = {
"""t5-small""": 5_1_2,
"""t5-base""": 5_1_2,
"""t5-large""": 5_1_2,
"""t5-3b""": 5_1_2,
"""t5-11b""": 5_1_2,
}
lowerCAmelCase__ = """▁"""
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="</s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case=1_00 , __snake_case=None , __snake_case = None , __snake_case=True , **__snake_case , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(__snake_case)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase : List[str] = len(set(filter(lambda __snake_case: bool('extra_id' in str(__snake_case)) , __snake_case)))
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens')
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565')
_UpperCamelCase : Any = legacy
_UpperCamelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=__snake_case , **__snake_case , )
_UpperCamelCase : Tuple = vocab_file
_UpperCamelCase : Any = extra_ids
_UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__snake_case)
@staticmethod
def A__ ( __snake_case , __snake_case , __snake_case):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase : Dict = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , __snake_case , )
return max_model_length
@property
def A__ ( self):
return self.sp_model.get_piece_size() + self._extra_ids
def A__ ( self):
_UpperCamelCase : int = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__snake_case)) + [1]
return ([0] * len(__snake_case)) + [1] + ([0] * len(__snake_case)) + [1]
def A__ ( self):
return list(
set(filter(lambda __snake_case: bool(re.search(r'<extra_id_\d+>' , __snake_case)) is not None , self.additional_special_tokens)))
def A__ ( self):
return [self._convert_token_to_id(__snake_case) for token in self.get_sentinel_tokens()]
def A__ ( self , __snake_case):
if len(__snake_case) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.')
return token_ids
else:
return token_ids + [self.eos_token_id]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Tuple = self._add_eos_if_not_present(__snake_case)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase : Tuple = self._add_eos_if_not_present(__snake_case)
return token_ids_a + token_ids_a
def __getstate__( self):
_UpperCamelCase : Any = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self , __snake_case , **__snake_case):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCamelCase : Any = SPIECE_UNDERLINE + text.replace(__snake_case , ' ')
return super().tokenize(__snake_case , **__snake_case)
def A__ ( self , __snake_case , **__snake_case):
if not self.legacy:
_UpperCamelCase : int = text.startswith(__snake_case)
if is_first:
_UpperCamelCase : Union[str, Any] = text[1:]
_UpperCamelCase : Optional[Any] = self.sp_model.encode(__snake_case , out_type=__snake_case)
if not self.legacy and not is_first and not text.startswith(' ') and tokens[0].startswith(__snake_case):
_UpperCamelCase : Union[str, Any] = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def A__ ( self , __snake_case):
if token.startswith('<extra_id_'):
_UpperCamelCase : int = re.match(r'<extra_id_(\d+)>' , __snake_case)
_UpperCamelCase : Union[str, Any] = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__snake_case)
def A__ ( self , __snake_case):
if index < self.sp_model.get_piece_size():
_UpperCamelCase : Tuple = self.sp_model.IdToPiece(__snake_case)
else:
_UpperCamelCase : List[str] = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def A__ ( self , __snake_case):
_UpperCamelCase : List[Any] = []
_UpperCamelCase : List[Any] = ''
_UpperCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case) + token
_UpperCamelCase : str = True
_UpperCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__snake_case)
_UpperCamelCase : List[str] = False
out_string += self.sp_model.decode(__snake_case)
return out_string.strip()
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : Tuple = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 1
|
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0 , UpperCAmelCase_ : int = 2_2 ) -> int:
'''simple docstring'''
_UpperCamelCase : Any = range(1 , UpperCAmelCase_ )
_UpperCamelCase : Any = range(1 , UpperCAmelCase_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 648
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 1
|
import math
def lowerCamelCase_ ( UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
_UpperCamelCase : Dict = len(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = int(math.floor(math.sqrt(UpperCAmelCase_ ) ) )
_UpperCamelCase : Union[str, Any] = 0
while arr[min(UpperCAmelCase_ , UpperCAmelCase_ ) - 1] < x:
_UpperCamelCase : List[Any] = step
step += int(math.floor(math.sqrt(UpperCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCamelCase : int = prev + 1
if prev == min(UpperCAmelCase_ , UpperCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
lowerCAmelCase__ = int(input("""Enter the number to be searched:\n"""))
lowerCAmelCase__ = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f'Number {x} is at index {res}')
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 1
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=32 , __snake_case=2 , __snake_case=3 , __snake_case=16 , __snake_case=[1, 2, 1] , __snake_case=[2, 2, 4] , __snake_case=2 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=True , __snake_case=0.0_2 , __snake_case=1e-5 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=10 , __snake_case=8 , ):
_UpperCamelCase : int = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Tuple = num_channels
_UpperCamelCase : Union[str, Any] = embed_dim
_UpperCamelCase : Dict = depths
_UpperCamelCase : Tuple = num_heads
_UpperCamelCase : Dict = window_size
_UpperCamelCase : List[Any] = mlp_ratio
_UpperCamelCase : Optional[Any] = qkv_bias
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Optional[Any] = drop_path_rate
_UpperCamelCase : int = hidden_act
_UpperCamelCase : List[Any] = use_absolute_embeddings
_UpperCamelCase : Tuple = patch_norm
_UpperCamelCase : str = layer_norm_eps
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : List[str] = scope
_UpperCamelCase : int = use_labels
_UpperCamelCase : List[str] = type_sequence_label_size
_UpperCamelCase : List[Any] = encoder_stride
def A__ ( self):
_UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : Optional[Any] = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = SwinvaModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
_UpperCamelCase : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_UpperCamelCase : Tuple = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = SwinvaForMaskedImageModeling(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Tuple = SwinvaForMaskedImageModeling(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase : Dict = model(__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Union[str, Any] = self.type_sequence_label_size
_UpperCamelCase : List[str] = SwinvaForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Union[str, Any] = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = config_and_inputs
_UpperCamelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
a__ = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Optional[Any] = SwinvaModelTester(self)
_UpperCamelCase : Dict = ConfigTester(self , config_class=__snake_case , embed_dim=37)
def A__ ( self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.')
def A__ ( self):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = model_class(__snake_case)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear))
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(__snake_case)
_UpperCamelCase : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : int = [*signature.parameters.keys()]
_UpperCamelCase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[Any] = True
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : str = True
_UpperCamelCase : Optional[int] = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.attentions
_UpperCamelCase : Tuple = len(self.model_tester.depths)
self.assertEqual(len(__snake_case) , __snake_case)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase : str = True
_UpperCamelCase : List[Any] = config.window_size**2
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : int = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : str = outputs.attentions
self.assertEqual(len(__snake_case) , __snake_case)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_UpperCamelCase : Optional[int] = len(__snake_case)
# Check attention is always last and order is fine
_UpperCamelCase : int = True
_UpperCamelCase : Any = True
_UpperCamelCase : int = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Dict = model(**self._prepare_for_class(__snake_case , __snake_case))
if hasattr(self.model_tester , 'num_hidden_states_types'):
_UpperCamelCase : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_UpperCamelCase : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(__snake_case))
_UpperCamelCase : Tuple = outputs.attentions
self.assertEqual(len(__snake_case) , __snake_case)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Any = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : int = outputs.hidden_states
_UpperCamelCase : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(__snake_case) , __snake_case)
# Swinv2 has a different seq_length
_UpperCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_UpperCamelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
_UpperCamelCase : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(__snake_case) , __snake_case)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = reshaped_hidden_states[0].shape
_UpperCamelCase : Tuple = (
reshaped_hidden_states[0].view(__snake_case , __snake_case , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Optional[Any] = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_UpperCamelCase : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCamelCase : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Tuple = SwinvaModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : str = _config_zero_init(__snake_case)
for model_class in self.all_model_classes:
_UpperCamelCase : List[str] = model_class(config=__snake_case)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256')
if is_vision_available()
else None
)
@slow
def A__ ( self):
_UpperCamelCase : Dict = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256').to(
__snake_case)
_UpperCamelCase : Any = self.default_image_processor
_UpperCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCamelCase : Dict = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : int = model(**__snake_case)
# verify the logits
_UpperCamelCase : int = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
| 648
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 1
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase__ = 4
lowerCAmelCase__ = 3
class lowercase ( _lowercase ):
"""simple docstring"""
pass
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for shard in shards:
for i in range(UpperCAmelCase_ ):
yield {"i": i, "shard": shard}
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = int(os.environ['RANK'] )
_UpperCamelCase : str = int(os.environ['WORLD_SIZE'] )
_UpperCamelCase : List[str] = ArgumentParser()
parser.add_argument('--streaming' , type=UpperCAmelCase_ )
parser.add_argument('--local_rank' , type=UpperCAmelCase_ )
parser.add_argument('--num_workers' , type=UpperCAmelCase_ , default=0 )
_UpperCamelCase : Optional[Any] = parser.parse_args()
_UpperCamelCase : Optional[int] = args.streaming
_UpperCamelCase : int = args.num_workers
_UpperCamelCase : List[str] = {'shards': [F'''shard_{shard_idx}''' for shard_idx in range(UpperCAmelCase_ )]}
_UpperCamelCase : Tuple = IterableDataset.from_generator(UpperCAmelCase_ , gen_kwargs=UpperCAmelCase_ )
if not streaming:
_UpperCamelCase : List[Any] = Dataset.from_list(list(UpperCAmelCase_ ) )
_UpperCamelCase : int = split_dataset_by_node(UpperCAmelCase_ , rank=UpperCAmelCase_ , world_size=UpperCAmelCase_ )
_UpperCamelCase : Any = torch.utils.data.DataLoader(UpperCAmelCase_ , num_workers=UpperCAmelCase_ )
_UpperCamelCase : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_UpperCamelCase : Union[str, Any] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_UpperCamelCase : int = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 648
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 1
|
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
lowerCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ = f'down_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ = f'down_blocks.{i}.attentions.{j}.'
lowerCAmelCase__ = f'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ = f'up_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ = f'up_blocks.{i}.attentions.{j}.'
lowerCAmelCase__ = f'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ = f'down_blocks.{i}.downsamplers.0.conv.'
lowerCAmelCase__ = f'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ = f'up_blocks.{i}.upsamplers.0.'
lowerCAmelCase__ = f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ = """mid_block.attentions.0."""
lowerCAmelCase__ = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ = f'mid_block.resnets.{j}.'
lowerCAmelCase__ = f'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_UpperCamelCase : Union[str, Any] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_UpperCamelCase : Tuple = v.replace(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[str] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_UpperCamelCase : Tuple = v.replace(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = v
_UpperCamelCase : int = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ = f'encoder.down_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ = f'down_blocks.{i}.downsamplers.0.'
lowerCAmelCase__ = f'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ = f'up_blocks.{i}.upsamplers.0.'
lowerCAmelCase__ = f'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ = f'decoder.up_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ = f'mid_block.resnets.{i}.'
lowerCAmelCase__ = f'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ) -> Any:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_UpperCamelCase : List[str] = v.replace(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Any = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_UpperCamelCase : Dict = v.replace(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = v
_UpperCamelCase : List[str] = {v: vae_state_dict[k] for k, v in mapping.items()}
_UpperCamelCase : int = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
_UpperCamelCase : Any = reshape_weight_for_sd(UpperCAmelCase_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
lowerCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ = {"""q""": 0, """k""": 1, """v""": 2}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Tuple = {}
_UpperCamelCase : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
_UpperCamelCase : Dict = k[: -len('.q_proj.weight' )]
_UpperCamelCase : List[str] = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
_UpperCamelCase : Union[str, Any] = [None, None, None]
_UpperCamelCase : str = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
_UpperCamelCase : Optional[int] = k[: -len('.q_proj.bias' )]
_UpperCamelCase : Dict = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
_UpperCamelCase : Optional[Any] = [None, None, None]
_UpperCamelCase : List[str] = v
continue
_UpperCamelCase : Union[str, Any] = textenc_pattern.sub(lambda UpperCAmelCase_ : protected[re.escape(m.group(0 ) )] , UpperCAmelCase_ )
_UpperCamelCase : int = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
_UpperCamelCase : List[Any] = textenc_pattern.sub(lambda UpperCAmelCase_ : protected[re.escape(m.group(0 ) )] , UpperCAmelCase_ )
_UpperCamelCase : List[str] = torch.cat(UpperCAmelCase_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
_UpperCamelCase : List[str] = textenc_pattern.sub(lambda UpperCAmelCase_ : protected[re.escape(m.group(0 ) )] , UpperCAmelCase_ )
_UpperCamelCase : Tuple = torch.cat(UpperCAmelCase_ )
return new_state_dict
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
lowerCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase__ = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase__ = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ = load_file(unet_path, device="""cpu""")
else:
lowerCAmelCase__ = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
lowerCAmelCase__ = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
lowerCAmelCase__ = load_file(vae_path, device="""cpu""")
else:
lowerCAmelCase__ = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
lowerCAmelCase__ = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
lowerCAmelCase__ = load_file(text_enc_path, device="""cpu""")
else:
lowerCAmelCase__ = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
lowerCAmelCase__ = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
lowerCAmelCase__ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 648
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.